* Copyright (C) 2009-2012 Cray, Inc.
*
* Derived from work by Eric Barton <eric@bartonsoftware.com>
+ * Author: James Shimek <jshimek@cray.com>
* Author: Nic Henke <nic@cray.com>
*
* This file is part of Lustre, http://www.lustre.org.
*
*/
+#include <asm/page.h>
#include <linux/nmi.h>
+#include <linux/pagemap.h>
+
+#include <libcfs/linux/linux-mem.h>
+
#include "gnilnd.h"
/* this is useful when needed to debug wire corruption. */
if (!already_live) {
wake_up_all(&dev->gnd_waitq);
}
- return;
}
void kgnilnd_schedule_device_timer(unsigned long arg)
* == 0: reschedule if someone marked him WANTS_SCHED
* > 0 : force a reschedule */
/* Return code 0 means it did not schedule the conn, 1
- * means it succesfully scheduled the conn.
+ * means it successfully scheduled the conn.
*/
int
* as scheduled */
int
-_kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld)
+_kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld, int lock_held)
{
kgn_device_t *dev = conn->gnc_device;
int sched;
conn, sched);
CDEBUG(D_INFO, "scheduling conn 0x%p caller %s:%d\n", conn, caller, line);
-
- spin_lock(&dev->gnd_lock);
+ if (!lock_held)
+ spin_lock(&dev->gnd_lock);
list_add_tail(&conn->gnc_schedlist, &dev->gnd_ready_conns);
- spin_unlock(&dev->gnd_lock);
+ if (!lock_held)
+ spin_unlock(&dev->gnd_lock);
set_mb(conn->gnc_last_sched_ask, jiffies);
rc = 1;
} else {
/* make sure thread(s) going to process conns - but let it make
* separate decision from conn schedule */
+ if (!lock_held)
+ kgnilnd_schedule_device(dev);
+ return rc;
+}
+
+int
+_kgnilnd_schedule_delay_conn(kgn_conn_t *conn)
+{
+ kgn_device_t *dev = conn->gnc_device;
+ int rc = 0;
+ spin_lock(&dev->gnd_lock);
+ if (list_empty(&conn->gnc_delaylist)) {
+ list_add_tail(&conn->gnc_delaylist, &dev->gnd_delay_conns);
+ rc = 1;
+ }
+ spin_unlock(&dev->gnd_lock);
+
kgnilnd_schedule_device(dev);
return rc;
}
/* Only free the buffer if we used it */
if (tx->tx_buffer_copy != NULL) {
- vfree(tx->tx_buffer_copy);
+ kgnilnd_vfree(tx->tx_buffer_copy, tx->tx_rdma_desc.length);
tx->tx_buffer_copy = NULL;
CDEBUG(D_MALLOC, "vfreed buffer2\n");
}
if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ALLOC_TX))
return tx;
- tx = kmem_cache_alloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC);
+ tx = kmem_cache_zalloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC);
if (tx == NULL) {
CERROR("failed to allocate tx\n");
return NULL;
CDEBUG(D_MALLOC, "slab-alloced 'tx': %lu at %p.\n",
sizeof(*tx), tx);
- /* need this memset, cache alloc'd memory is not cleared */
- memset(tx, 0, sizeof(*tx));
-
/* setup everything here to minimize time under the lock */
tx->tx_buftype = GNILND_BUF_NONE;
tx->tx_msg.gnm_type = GNILND_MSG_NONE;
#define _kgnilnd_cksum(seed, ptr, nob) csum_partial(ptr, nob, seed)
/* we don't use offset as every one is passing a buffer reference that already
- * includes the offset into the base address -
- * see kgnilnd_setup_virt_buffer and kgnilnd_setup_immediate_buffer */
+ * includes the offset into the base address.
+ */
static inline __u16
kgnilnd_cksum(void *ptr, size_t nob)
{
return sum;
}
-inline __u16
-kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int nob, int dump_blob)
+__u16
+kgnilnd_cksum_kiov(unsigned int nkiov, struct bio_vec *kiov,
+ unsigned int offset, unsigned int nob, int dump_blob)
{
__wsum cksum = 0;
__wsum tmpck;
/* if loops changes, please change kgnilnd_setup_phys_buffer */
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
+ while (offset >= kiov->bv_len) {
+ offset -= kiov->bv_len;
nkiov--;
kiov++;
LASSERT(nkiov > 0);
}
- /* ignore nob here, if nob < (kiov_len - offset), kiov == 1 */
- odd = (unsigned long) (kiov[0].kiov_len - offset) & 1;
+ /* ignore nob here, if nob < (bv_len - offset), kiov == 1 */
+ odd = (unsigned long) (kiov[0].bv_len - offset) & 1;
if ((odd || *kgnilnd_tunables.kgn_vmap_cksum) && nkiov > 1) {
struct page **pages = kgnilnd_data.kgn_cksum_map_pages[get_cpu()];
get_cpu(), kgnilnd_data.kgn_cksum_map_pages);
CDEBUG(D_BUFFS, "odd %d len %u offset %u nob %u\n",
- odd, kiov[0].kiov_len, offset, nob);
+ odd, kiov[0].bv_len, offset, nob);
for (i = 0; i < nkiov; i++) {
- pages[i] = kiov[i].kiov_page;
+ pages[i] = kiov[i].bv_page;
}
addr = vmap(pages, nkiov, VM_MAP, PAGE_KERNEL);
}
atomic_inc(&kgnilnd_data.kgn_nvmap_cksum);
- tmpck = _kgnilnd_cksum(0, (void *) addr + kiov[0].kiov_offset + offset, nob);
+ tmpck = _kgnilnd_cksum(0, ((void *) addr + kiov[0].bv_offset +
+ offset), nob);
cksum = tmpck;
if (dump_blob) {
kgnilnd_dump_blob(D_BUFFS, "flat kiov RDMA payload",
- (void *)addr + kiov[0].kiov_offset + offset, nob);
+ (void *)addr + kiov[0].bv_offset +
+ offset, nob);
}
CDEBUG(D_BUFFS, "cksum 0x%x (+0x%x) for addr 0x%p+%u len %u offset %u\n",
- cksum, tmpck, addr, kiov[0].kiov_offset, nob, offset);
+ cksum, tmpck, addr, kiov[0].bv_offset, nob, offset);
vunmap(addr);
} else {
do {
- fraglen = min(kiov->kiov_len - offset, nob);
+ fraglen = min(kiov->bv_len - offset, nob);
/* make dang sure we don't send a bogus checksum if somehow we get
* an odd length fragment on anything but the last entry in a kiov -
* we know from kgnilnd_setup_rdma_buffer that we can't have non
* PAGE_SIZE pages in the middle, so if nob < PAGE_SIZE, it is the last one */
LASSERTF(!(fraglen&1) || (nob < PAGE_SIZE),
- "odd fraglen %u on nkiov %d, nob %u kiov_len %u offset %u kiov 0x%p\n",
- fraglen, nkiov, nob, kiov->kiov_len, offset, kiov);
+ "odd fraglen %u on nkiov %d, nob %u bv_len %u offset %u kiov 0x%p\n",
+ fraglen, nkiov, nob, kiov->bv_len,
+ offset, kiov);
- addr = (void *)kmap(kiov->kiov_page) + kiov->kiov_offset + offset;
+ addr = (void *)kmap(kiov->bv_page) + kiov->bv_offset +
+ offset;
tmpck = _kgnilnd_cksum(cksum, addr, fraglen);
CDEBUG(D_BUFFS,
"cksum 0x%x (+0x%x) for page 0x%p+%u (0x%p) len %u offset %u\n",
- cksum, tmpck, kiov->kiov_page, kiov->kiov_offset, addr,
- fraglen, offset);
+ cksum, tmpck, kiov->bv_page, kiov->bv_offset,
+ addr, fraglen, offset);
cksum = tmpck;
if (dump_blob)
kgnilnd_dump_blob(D_BUFFS, "kiov cksum", addr, fraglen);
- kunmap(kiov->kiov_page);
+ kunmap(kiov->bv_page);
kiov++;
nkiov--;
LBUG();
}
/* only allow NAK on error and truncate to zero */
- LASSERTF(error <= 0, "error %d conn 0x%p, cookie "LPU64"\n",
+ LASSERTF(error <= 0, "error %d conn 0x%p, cookie %llu\n",
error, conn, cookie);
tx = kgnilnd_new_tx_msg(nak_type, source);
kgnilnd_queue_tx(conn, tx);
}
-int
-kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *iov,
- lnet_kiov_t *kiov, unsigned int offset, unsigned int nob)
-
+static int
+kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
+ struct bio_vec *kiov,
+ unsigned int offset, unsigned int nob)
{
kgn_msg_t *msg = &tx->tx_msg;
int i;
if (nob == 0) {
tx->tx_buffer = NULL;
- } else if (kiov != NULL) {
+ } else {
+
+ if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) {
+ niov = round_up(nob + offset + kiov->bv_offset,
+ PAGE_SIZE);
+ }
+
LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
- "bad niov %d\n", niov);
+ "bad niov %d msg %p kiov %p offset %d nob%d\n",
+ niov, msg, kiov, offset, nob);
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
+ while (offset >= kiov->bv_len) {
+ offset -= kiov->bv_len;
niov--;
kiov++;
LASSERT(niov > 0);
}
for (i = 0; i < niov; i++) {
- /* We can't have a kiov_offset on anything but the first entry,
- * otherwise we'll have a hole at the end of the mapping as we only map
- * whole pages.
- * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
- * than kiov_len, we will also have a whole at the end of that page
- * which isn't allowed */
- if ((kiov[i].kiov_offset != 0 && i > 0) ||
- (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) {
- CNETERR("Can't make payload contiguous in I/O VM:"
- "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
- i, offset, nob, kiov->kiov_offset, kiov->kiov_len);
+ /* We can't have a bv_offset on anything but the first
+ * entry, otherwise we'll have a hole at the end of the
+ * mapping as we only map whole pages.
+ * Also, if we have a bv_len < PAGE_SIZE but we need to
+ * map more than bv_len, we will also have a whole at
+ * the end of that page which isn't allowed
+ */
+ if ((kiov[i].bv_offset != 0 && i > 0) ||
+ (kiov[i].bv_offset + kiov[i].bv_len != PAGE_SIZE &&
+ i < niov - 1)) {
+ CNETERR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n",
+ i, offset, nob, kiov->bv_offset,
+ kiov->bv_len);
RETURN(-EINVAL);
}
- tx->tx_imm_pages[i] = kiov[i].kiov_page;
+ tx->tx_imm_pages[i] = kiov[i].bv_page;
}
/* hijack tx_phys for the later unmap */
if (niov == 1) {
/* tx->phyx being equal to NULL is the signal for unmap to discern between kmap and vmap */
tx->tx_phys = NULL;
- tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) + kiov[0].kiov_offset + offset;
+ tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) +
+ kiov[0].bv_offset + offset;
atomic_inc(&kgnilnd_data.kgn_nkmap_short);
GNIDBG_TX(D_NET, tx, "kmapped page for %d bytes for kiov 0x%p, buffer 0x%p",
nob, kiov, tx->tx_buffer);
}
atomic_inc(&kgnilnd_data.kgn_nvmap_short);
- /* make sure we take into account the kiov offset as the start of the buffer */
- tx->tx_buffer = (void *)tx->tx_phys + kiov[0].kiov_offset + offset;
- GNIDBG_TX(D_NET, tx, "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p",
- niov, nob, kiov, tx->tx_phys, tx->tx_buffer);
+ /* make sure we take into account the kiov offset as the
+ * start of the buffer
+ */
+ tx->tx_buffer = (void *)tx->tx_phys + kiov[0].bv_offset
+ + offset;
+ GNIDBG_TX(D_NET, tx,
+ "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p",
+ niov, nob, kiov, tx->tx_phys, tx->tx_buffer);
}
tx->tx_buftype = GNILND_BUF_IMMEDIATE_KIOV;
tx->tx_nob = nob;
- } else {
- /* For now this is almost identical to kgnilnd_setup_virt_buffer, but we
- * could "flatten" the payload into a single contiguous buffer ready
- * for sending direct over an FMA if we ever needed to. */
-
- LASSERT(niov > 0);
-
- while (offset >= iov->iov_len) {
- offset -= iov->iov_len;
- niov--;
- iov++;
- LASSERT(niov > 0);
- }
-
- if (nob > iov->iov_len - offset) {
- CERROR("Can't handle multiple vaddr fragments\n");
- return -EMSGSIZE;
- }
-
- tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
-
- tx->tx_buftype = GNILND_BUF_IMMEDIATE;
- tx->tx_nob = nob;
}
/* checksum payload early - it shouldn't be changing after lnd_send */
}
int
-kgnilnd_setup_virt_buffer(kgn_tx_t *tx,
- unsigned int niov, struct iovec *iov,
- unsigned int offset, unsigned int nob)
-
-{
- LASSERT(nob > 0);
- LASSERT(niov > 0);
- LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
-
- while (offset >= iov->iov_len) {
- offset -= iov->iov_len;
- niov--;
- iov++;
- LASSERT(niov > 0);
- }
-
- if (nob > iov->iov_len - offset) {
- CERROR("Can't handle multiple vaddr fragments\n");
- return -EMSGSIZE;
- }
-
- tx->tx_buftype = GNILND_BUF_VIRT_UNMAPPED;
- tx->tx_nob = nob;
- tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
- return 0;
-}
-
-int
-kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
+kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, struct bio_vec *kiov,
unsigned int offset, unsigned int nob)
{
gni_mem_segment_t *phys;
/* if loops changes, please change kgnilnd_cksum_kiov
* and kgnilnd_setup_immediate_buffer */
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
+ while (offset >= kiov->bv_len) {
+ offset -= kiov->bv_len;
nkiov--;
kiov++;
LASSERT(nkiov > 0);
tx->tx_buftype = GNILND_BUF_PHYS_UNMAPPED;
tx->tx_nob = nob;
- /* kiov_offset is start of 'valid' buffer, so index offset past that */
- tx->tx_buffer = (void *)((unsigned long)(kiov->kiov_offset + offset));
+ /* bv_offset is start of 'valid' buffer, so index offset past that */
+ tx->tx_buffer = (void *)((unsigned long)(kiov->bv_offset + offset));
phys = tx->tx_phys;
CDEBUG(D_NET, "tx 0x%p buffer 0x%p map start kiov 0x%p+%u niov %d offset %u\n",
- tx, tx->tx_buffer, kiov, kiov->kiov_offset, nkiov, offset);
+ tx, tx->tx_buffer, kiov, kiov->bv_offset, nkiov, offset);
do {
- fraglen = min(kiov->kiov_len - offset, nob);
-
- /* We can't have a kiov_offset on anything but the first entry,
- * otherwise we'll have a hole at the end of the mapping as we only map
- * whole pages. Only the first page is allowed to have an offset -
- * we'll add that into tx->tx_buffer and that will get used when we
- * map in the segments (see kgnilnd_map_buffer).
- * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
- * than kiov_len, we will also have a whole at the end of that page
- * which isn't allowed */
+ fraglen = min(kiov->bv_len - offset, nob);
+
+ /* We can't have a bv_offset on anything but the first entry,
+ * otherwise we'll have a hole at the end of the mapping as we
+ * only map whole pages. Only the first page is allowed to
+ * have an offset - we'll add that into tx->tx_buffer and that
+ * will get used when we map in the segments (see
+ * kgnilnd_map_buffer). Also, if we have a bv_len < PAGE_SIZE
+ * but we need to map more than bv_len, we will also have a
+ * whole at the end of that page which isn't allowed
+ */
if ((phys != tx->tx_phys) &&
- ((kiov->kiov_offset != 0) ||
- ((kiov->kiov_len < PAGE_SIZE) && (nob > kiov->kiov_len)))) {
- CERROR("Can't make payload contiguous in I/O VM:"
- "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
+ ((kiov->bv_offset != 0) ||
+ ((kiov->bv_len < PAGE_SIZE) && (nob > kiov->bv_len)))) {
+ CERROR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n",
(int)(phys - tx->tx_phys),
- offset, nob, kiov->kiov_offset, kiov->kiov_len);
+ offset, nob, kiov->bv_offset, kiov->bv_len);
rc = -EINVAL;
GOTO(error, rc);
}
GOTO(error, rc);
}
- CDEBUG(D_BUFFS, "page 0x%p kiov_offset %u kiov_len %u nob %u "
- "nkiov %u offset %u\n",
- kiov->kiov_page, kiov->kiov_offset, kiov->kiov_len, nob, nkiov, offset);
+ CDEBUG(D_BUFFS,
+ "page 0x%p bv_offset %u bv_len %u nob %u nkiov %u offset %u\n",
+ kiov->bv_page, kiov->bv_offset, kiov->bv_len, nob, nkiov,
+ offset);
- phys->address = lnet_page2phys(kiov->kiov_page);
+ phys->address = page_to_phys(kiov->bv_page);
phys++;
kiov++;
nkiov--;
static inline int
kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
- struct iovec *iov, lnet_kiov_t *kiov,
+ struct bio_vec *kiov,
unsigned int offset, unsigned int nob)
{
- int rc;
-
- LASSERTF((iov == NULL) != (kiov == NULL), "iov 0x%p, kiov 0x%p, tx 0x%p,"
- " offset %d, nob %d, niov %d\n"
- , iov, kiov, tx, offset, nob, niov);
-
- if (kiov != NULL) {
- rc = kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
- } else {
- rc = kgnilnd_setup_virt_buffer(tx, niov, iov, offset, nob);
- }
- return rc;
+ return kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
}
/* kgnilnd_parse_lnet_rdma()
* transfer.
*/
static void
-kgnilnd_parse_lnet_rdma(lnet_msg_t *lntmsg, unsigned int *niov,
+kgnilnd_parse_lnet_rdma(struct lnet_msg *lntmsg, unsigned int *niov,
unsigned int *offset, unsigned int *nob,
- lnet_kiov_t **kiov, int put_len)
+ struct bio_vec **kiov, int put_len)
{
/* GETs are weird, see kgnilnd_send */
if (lntmsg->msg_type == LNET_MSG_GET) {
if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) {
*kiov = NULL;
} else {
- *kiov = lntmsg->msg_md->md_iov.kiov;
+ *kiov = lntmsg->msg_md->md_kiov;
}
*niov = lntmsg->msg_md->md_niov;
*nob = lntmsg->msg_md->md_length;
static inline void
kgnilnd_compute_rdma_cksum(kgn_tx_t *tx, int put_len)
{
- unsigned int niov, offset, nob;
- lnet_kiov_t *kiov;
- lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
- int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
+ unsigned int niov, offset, nob;
+ struct bio_vec *kiov;
+ struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
+ int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE) ||
(tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE) ||
int rc = 0;
__u16 cksum;
unsigned int niov, offset, nob;
- lnet_kiov_t *kiov;
- lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
+ struct bio_vec *kiov;
+ struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
int dump_on_err = *kgnilnd_tunables.kgn_checksum_dump;
/* we can only match certain requests */
if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
atomic64_add(bytes, &dev->gnd_rdmaq_bytes_out);
- GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to "LPD64"",
+ GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %lld",
bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
}
atomic64_sub(bytes, &dev->gnd_rdmaq_bytes_out);
LASSERTF(atomic64_read(&dev->gnd_rdmaq_bytes_out) >= 0,
"bytes_out negative! %ld\n", atomic64_read(&dev->gnd_rdmaq_bytes_out));
- GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to "LPD64"",
+ GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to %lld",
bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
}
* GART resource, etc starvation handling */
if (rrc != GNI_RC_SUCCESS) {
GNIDBG_TX(D_NET, tx, "Can't map %d pages: dev %d "
- "phys %u pp %u, virt %u nob "LPU64"",
+ "phys %u pp %u, virt %u nob %llu",
tx->tx_phys_npages, dev->gnd_id,
dev->gnd_map_nphys, dev->gnd_map_physnop,
dev->gnd_map_nvirt, dev->gnd_map_virtnob);
NULL, flags, &tx->tx_map_key);
if (rrc != GNI_RC_SUCCESS) {
GNIDBG_TX(D_NET, tx, "Can't map %u bytes: dev %d "
- "phys %u pp %u, virt %u nob "LPU64"",
+ "phys %u pp %u, virt %u nob %llu",
tx->tx_nob, dev->gnd_id,
dev->gnd_map_nphys, dev->gnd_map_physnop,
dev->gnd_map_nvirt, dev->gnd_map_virtnob);
int hold_timeout = 0;
/* code below relies on +1 relationship ... */
- CLASSERT(GNILND_BUF_PHYS_MAPPED == (GNILND_BUF_PHYS_UNMAPPED + 1));
- CLASSERT(GNILND_BUF_VIRT_MAPPED == (GNILND_BUF_VIRT_UNMAPPED + 1));
+ BUILD_BUG_ON(GNILND_BUF_PHYS_MAPPED !=
+ (GNILND_BUF_PHYS_UNMAPPED + 1));
+ BUILD_BUG_ON(GNILND_BUF_VIRT_MAPPED !=
+ (GNILND_BUF_VIRT_UNMAPPED + 1));
switch (tx->tx_buftype) {
default:
* verified peer notification - the theory is that
* a TX error can be communicated in all other cases */
if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED &&
+ error != -GNILND_NOPURG &&
kgnilnd_check_purgatory_conn(tx->tx_conn)) {
kgnilnd_add_purgatory_tx(tx);
hold_timeout = GNILND_TIMEOUT2DEADMAN;
GNIDBG_TX(D_NET, tx,
- "dev %p delaying MDD release for %dms key "LPX64"."LPX64"",
+ "dev %p delaying MDD release for %dms key %#llx.%#llx",
tx->tx_conn->gnc_device, hold_timeout,
tx->tx_map_key.qword1, tx->tx_map_key.qword2);
}
void
kgnilnd_tx_done(kgn_tx_t *tx, int completion)
{
- lnet_msg_t *lntmsg0, *lntmsg1;
+ struct lnet_msg *lntmsg0, *lntmsg1;
int status0, status1;
- lnet_ni_t *ni = NULL;
+ struct lnet_ni *ni = NULL;
kgn_conn_t *conn = tx->tx_conn;
LASSERT(!in_interrupt());
* could free up lnet credits, resulting in a call chain back into
* the LND via kgnilnd_send and friends */
- lnet_finalize(ni, lntmsg0, status0);
+ lnet_finalize(lntmsg0, status0);
if (lntmsg1 != NULL) {
- lnet_finalize(ni, lntmsg1, status1);
+ lnet_finalize(lntmsg1, status1);
}
}
* if we are sending to the same node faster than 256000/sec.
* To help guard against this, we OR in the tx_seq - that is 32 bits */
- tx->tx_id.txe_chips = (__u32)(jiffies | conn->gnc_tx_seq);
+ tx->tx_id.txe_chips = (__u32)(jiffies | atomic_read(&conn->gnc_tx_seq));
GNIDBG_TX(D_NET, tx, "set cookie/id/bits", NULL);
return 0;
}
-static inline int
-kgnilnd_tx_should_retry(kgn_conn_t *conn, kgn_tx_t *tx)
+static inline void
+kgnilnd_tx_log_retrans(kgn_conn_t *conn, kgn_tx_t *tx)
{
- int max_retrans = *kgnilnd_tunables.kgn_max_retransmits;
int log_retrans;
- int log_retrans_level;
- /* I need kgni credits to send this. Replace tx at the head of the
- * fmaq and I'll get rescheduled when credits appear */
- tx->tx_state = 0;
- tx->tx_retrans++;
- conn->gnc_tx_retrans++;
- log_retrans = ((tx->tx_retrans < 25) || ((tx->tx_retrans % 25) == 0) ||
- (tx->tx_retrans > (max_retrans / 2)));
- log_retrans_level = tx->tx_retrans < (max_retrans / 2) ? D_NET : D_NETERROR;
-
- /* Decision time - either error, warn or just retransmit */
+ log_retrans = ((tx->tx_retrans < 25) || ((tx->tx_retrans % 25) == 0));
/* we don't care about TX timeout - it could be that the network is slower
* or throttled. We'll keep retranmitting - so if the network is so slow
* that we fill up our mailbox, we'll keep trying to resend that msg
* until we exceed the max_retrans _or_ gnc_last_rx expires, indicating
* that he hasn't send us any traffic in return */
-
- if (tx->tx_retrans > max_retrans) {
- /* this means we are not backing off the retransmits
- * in a healthy manner and are likely chewing up the
- * CPU cycles quite badly */
- GNIDBG_TOMSG(D_ERROR, &tx->tx_msg,
- "SOFTWARE BUG: too many retransmits (%d) for tx id %x "
- "conn 0x%p->%s\n",
- tx->tx_retrans, tx->tx_id, conn,
- libcfs_nid2str(conn->gnc_peer->gnp_nid));
-
- /* yes - double errors to help debug this condition */
- GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg, "connection dead. "
- "unable to send to %s for %lu secs (%d tries)",
- libcfs_nid2str(tx->tx_conn->gnc_peer->gnp_nid),
- cfs_duration_sec(jiffies - tx->tx_cred_wait),
- tx->tx_retrans);
-
- kgnilnd_close_conn(conn, -ETIMEDOUT);
-
- /* caller should terminate */
- RETURN(0);
- } else {
- /* some reasonable throttling of the debug message */
- if (log_retrans) {
- unsigned long now = jiffies;
- /* XXX Nic: Mystical TX debug here... */
- GNIDBG_SMSG_CREDS(log_retrans_level, conn);
- GNIDBG_TOMSG(log_retrans_level, &tx->tx_msg,
- "NOT_DONE on conn 0x%p->%s id %x retrans %d wait %dus"
- " last_msg %uus/%uus last_cq %uus/%uus",
- conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
- tx->tx_id, tx->tx_retrans,
- jiffies_to_usecs(now - tx->tx_cred_wait),
- jiffies_to_usecs(now - conn->gnc_last_tx),
- jiffies_to_usecs(now - conn->gnc_last_rx),
- jiffies_to_usecs(now - conn->gnc_last_tx_cq),
- jiffies_to_usecs(now - conn->gnc_last_rx_cq));
- }
- /* caller should retry */
- RETURN(1);
+
+ /* some reasonable throttling of the debug message */
+ if (log_retrans) {
+ unsigned long now = jiffies;
+ /* XXX Nic: Mystical TX debug here... */
+ /* We expect retransmissions so only log when D_NET is enabled */
+ GNIDBG_SMSG_CREDS(D_NET, conn);
+ GNIDBG_TOMSG(D_NET, &tx->tx_msg,
+ "NOT_DONE on conn 0x%p->%s id %x retrans %d wait %dus"
+ " last_msg %uus/%uus last_cq %uus/%uus",
+ conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
+ tx->tx_id, tx->tx_retrans,
+ jiffies_to_usecs(now - tx->tx_cred_wait),
+ jiffies_to_usecs(now - conn->gnc_last_tx),
+ jiffies_to_usecs(now - conn->gnc_last_rx),
+ jiffies_to_usecs(now - conn->gnc_last_tx_cq),
+ jiffies_to_usecs(now - conn->gnc_last_rx_cq));
}
}
{
kgn_conn_t *conn = tx->tx_conn;
kgn_msg_t *msg = &tx->tx_msg;
- int retry_send;
gni_return_t rrc;
unsigned long newest_last_rx, timeout;
unsigned long now;
* close message.
*/
if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) != 0 && msg->gnm_type != GNILND_MSG_CLOSE) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
/* Return -ETIME, we are closing the connection already so we dont want to
* have this tx hit the wire. The tx will be killed by the calling function.
* Once the EP is marked dirty the close message will be the last
}
if (time_after_eq(now, newest_last_rx + GNILND_TIMEOUTRX(timeout))) {
- GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn, "Cant send to %s after timeout lapse of %lu; TO %lu",
+ GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn,
+ "Cant send to %s after timeout lapse of %lu; TO %lu\n",
libcfs_nid2str(conn->gnc_peer->gnp_nid),
cfs_duration_sec(now - newest_last_rx),
cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
return -ETIME;
}
*/
msg->gnm_connstamp = conn->gnc_my_connstamp;
msg->gnm_payload_len = immediatenob;
- msg->gnm_seq = conn->gnc_tx_seq;
+ msg->gnm_seq = atomic_read(&conn->gnc_tx_seq);
/* always init here - kgn_checksum is a /sys module tunable
* and can be flipped at any point, even between msg init and sending */
if (unlikely(tx->tx_state & GNILND_TX_FAIL_SMSG)) {
rrc = cfs_fail_val ? cfs_fail_val : GNI_RC_NOT_DONE;
} else {
- rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
- msg, sizeof(*msg), immediate, immediatenob,
- tx->tx_id.txe_smsg_id);
+ rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
+ msg, sizeof(*msg), immediate,
+ immediatenob,
+ tx->tx_id.txe_smsg_id);
}
switch (rrc) {
case GNI_RC_SUCCESS:
- conn->gnc_tx_seq++;
+ atomic_inc(&conn->gnc_tx_seq);
conn->gnc_last_tx = jiffies;
/* no locking here as LIVE isn't a list */
kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_LIVE_FMAQ, 1);
/* serialize with seeing CQ events for completion on this, as well as
* tx_seq */
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
atomic_inc(&conn->gnc_device->gnd_short_ntx);
atomic64_add(immediatenob, &conn->gnc_device->gnd_short_txbytes);
return 0;
case GNI_RC_NOT_DONE:
- /* XXX Nic: We need to figure out how to track this
- * - there are bound to be good reasons for it,
- * but we want to know when it happens */
-
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ /* Jshimek: We can get GNI_RC_NOT_DONE for 3 reasons currently
+ * 1: out of mbox credits
+ * 2: out of mbox payload credits
+ * 3: On Aries out of dla credits
+ */
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
/* We'll handle this error inline - makes the calling logic much more
* clean */
return -EAGAIN;
}
- retry_send = kgnilnd_tx_should_retry(conn, tx);
- if (retry_send) {
- /* add to head of list for the state and retries */
- spin_lock(state_lock);
- kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, state, 0);
- spin_unlock(state_lock);
-
- /* We only reschedule for a certain number of retries, then
- * we will wait for the CQ events indicating a release of SMSG
- * credits */
- if (tx->tx_retrans < (*kgnilnd_tunables.kgn_max_retransmits/4)) {
- kgnilnd_schedule_conn(conn);
- return 0;
- } else {
- /* CQ event coming in signifies either TX completed or
- * RX receive. Either of these *could* free up credits
- * in the SMSG mbox and we should try sending again */
- GNIDBG_TX(D_NET, tx, "waiting for CQID %u event to resend",
- tx->tx_conn->gnc_cqid);
- /* use +ve return code to let upper layers know they
- * should stop looping on sends */
- return EAGAIN;
- }
+ /* I need kgni credits to send this. Replace tx at the head of the
+ * fmaq and I'll get rescheduled when credits appear. Reset the tx_state
+ * and bump retrans counts since we are requeueing the tx.
+ */
+ tx->tx_state = 0;
+ tx->tx_retrans++;
+ conn->gnc_tx_retrans++;
+
+ kgnilnd_tx_log_retrans(conn, tx);
+ /* add to head of list for the state and retries */
+ spin_lock(state_lock);
+ kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, state, 0);
+ spin_unlock(state_lock);
+
+ /* We only reschedule for a certain number of retries, then
+ * we will wait for the CQ events indicating a release of SMSG
+ * credits */
+ if (tx->tx_retrans < *kgnilnd_tunables.kgn_max_retransmits) {
+ kgnilnd_schedule_conn(conn);
+ return 0;
} else {
- return -EAGAIN;
+ /* CQ event coming in signifies either TX completed or
+ * RX receive. Either of these *could* free up credits
+ * in the SMSG mbox and we should try sending again */
+ GNIDBG_TX(D_NET, tx, "waiting for CQID %u event to resend",
+ tx->tx_conn->gnc_cqid);
+ kgnilnd_schedule_delay_conn(conn);
+ /* use +ve return code to let upper layers know they
+ * should stop looping on sends */
+ return EAGAIN;
}
default:
/* handle bad retcode gracefully */
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
return -EIO;
}
}
int rc;
timestamp = jiffies;
- mutex_lock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_lock(&dev->gnd_cq_mutex);
+ kgnilnd_conn_mutex_lock(&tx->tx_conn->gnc_smsg_mutex);
/* delay in jiffies - we are really concerned only with things that
* result in a schedule() or really holding this off for long times .
* NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
rc = 0;
} else {
atomic_inc(&conn->gnc_device->gnd_fast_try);
- rc = mutex_trylock(&conn->gnc_device->gnd_cq_mutex);
+ rc = kgnilnd_trylock(&conn->gnc_device->gnd_cq_mutex,
+ &conn->gnc_smsg_mutex);
}
if (!rc) {
rc = -EAGAIN;
}
/* lets us know if we can push this RDMA through now */
-inline int
+static int
kgnilnd_auth_rdma_bytes(kgn_device_t *dev, kgn_tx_t *tx)
{
long bytes_left;
}
void
-kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
+kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, struct lnet_process_id *target)
{
kgn_peer_t *peer;
kgn_peer_t *new_peer = NULL;
}
/* don't create a connection if the peer is marked down */
- if (peer->gnp_down == GNILND_RCA_NODE_DOWN) {
+ if (peer->gnp_state != GNILND_PEER_UP) {
read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
rc = -ENETRESET;
GOTO(no_peer, rc);
kgnilnd_add_peer_locked(target->nid, new_peer, &peer);
/* don't create a connection if the peer is not up */
- if (peer->gnp_down != GNILND_RCA_NODE_UP) {
+ if (peer->gnp_state != GNILND_PEER_UP) {
write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
rc = -ENETRESET;
GOTO(no_peer, rc);
RETURN_EXIT;
}
-void
+int
kgnilnd_rdma(kgn_tx_t *tx, int type,
kgn_rdma_desc_t *sink, unsigned int nob, __u64 cookie)
{
tx->tx_offset = ((__u64)((unsigned long)sink->gnrd_addr)) & 3;
if (tx->tx_offset)
- kgnilnd_admin_addref(kgnilnd_data.kgn_rev_offset);
+ atomic_inc(&kgnilnd_data.kgn_rev_offset);
if ((nob + tx->tx_offset) & 3) {
desc_nob = ((nob + tx->tx_offset) + (4 - ((nob + tx->tx_offset) & 3)));
- kgnilnd_admin_addref(kgnilnd_data.kgn_rev_length);
+ atomic_inc(&kgnilnd_data.kgn_rev_length);
} else {
desc_nob = (nob + tx->tx_offset);
}
if (tx->tx_buffer_copy == NULL) {
/* Allocate the largest copy buffer we will need, this will prevent us from overwriting data
* and require at most we allocate a few extra bytes. */
- tx->tx_buffer_copy = vmalloc(desc_nob);
+ tx->tx_buffer_copy = kgnilnd_vzalloc(desc_nob);
if (!tx->tx_buffer_copy) {
/* allocation of buffer failed nak the rdma */
kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
kgnilnd_tx_done(tx, -EFAULT);
- return;
+ return 0;
}
- kgnilnd_admin_addref(kgnilnd_data.kgn_rev_copy_buff);
+ atomic_inc(&kgnilnd_data.kgn_rev_copy_buff);
rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
if (rc != GNI_RC_SUCCESS) {
/* Registration Failed nak rdma and kill the tx. */
- vfree(tx->tx_buffer_copy);
+ kgnilnd_vfree(tx->tx_buffer_copy,
+ desc_nob);
tx->tx_buffer_copy = NULL;
kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
kgnilnd_tx_done(tx, -EFAULT);
- return;
+ return 0;
}
}
desc_map_key = tx->tx_buffer_copy_map_key;
tx->tx_rdma_desc.remote_mem_hndl = sink->gnrd_key;
tx->tx_rdma_desc.length = desc_nob;
tx->tx_nob_rdma = nob;
- if (*kgnilnd_tunables.kgn_bte_dlvr_mode)
- tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_dlvr_mode;
+ if (post_type == GNI_POST_RDMA_PUT && *kgnilnd_tunables.kgn_bte_put_dlvr_mode)
+ tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_put_dlvr_mode;
+ if (post_type == GNI_POST_RDMA_GET && *kgnilnd_tunables.kgn_bte_get_dlvr_mode)
+ tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_get_dlvr_mode;
/* prep final completion message */
kgnilnd_init_msg(&tx->tx_msg, type, tx->tx_msg.gnm_srcnid);
tx->tx_msg.gnm_u.completion.gncm_cookie = cookie;
if (nob == 0) {
kgnilnd_queue_tx(conn, tx);
- return;
+ return 0;
}
/* Don't lie (CLOSE == RDMA idle) */
LASSERTF(!conn->gnc_close_sent, "tx %p on conn %p after close sent %d\n",
tx, conn, conn->gnc_close_sent);
- GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x dlvr_mode 0x%x cookie:"LPX64,
- type, tx->tx_rdma_desc.dlvr_mode, cookie);
+ GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x conn %p dlvr_mode "
+ "0x%x cookie:%#llx",
+ type, conn, tx->tx_rdma_desc.dlvr_mode, cookie);
/* set CQ dedicated for RDMA */
tx->tx_rdma_desc.src_cq_hndl = conn->gnc_device->gnd_snd_rdma_cqh;
timestamp = jiffies;
- mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
+ kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
/* delay in jiffies - we are really concerned only with things that
* result in a schedule() or really holding this off for long times .
* NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
rrc = kgnilnd_post_rdma(conn->gnc_ephandle, &tx->tx_rdma_desc);
+ if (rrc == GNI_RC_ERROR_RESOURCE) {
+ kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_unmap_buffer(tx, 0);
+
+ if (tx->tx_buffer_copy != NULL) {
+ kgnilnd_vfree(tx->tx_buffer_copy, desc_nob);
+ tx->tx_buffer_copy = NULL;
+ }
+
+ spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
+ kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn,
+ GNILND_TX_MAPQ, 0);
+ spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
+ kgnilnd_schedule_device(tx->tx_conn->gnc_device);
+ return -EAGAIN;
+ }
+
spin_lock(&conn->gnc_list_lock);
kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_LIVE_RDMAQ, 1);
tx->tx_qtime = jiffies;
spin_unlock(&conn->gnc_list_lock);
-
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
/* XXX Nic: is this a place we should handle more errors for
* robustness sake */
LASSERT(rrc == GNI_RC_SUCCESS);
-
+ return 0;
}
kgn_rx_t *
CDEBUG(D_NET, "consuming %p\n", conn);
timestamp = jiffies;
- mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
/* delay in jiffies - we are really concerned only with things that
* result in a schedule() or really holding this off for long times .
* NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
rrc = kgnilnd_smsg_release(conn->gnc_ephandle);
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
LASSERTF(rrc == GNI_RC_SUCCESS, "bad rrc %d\n", rrc);
GNIDBG_SMSG_CREDS(D_NET, conn);
- return;
+ kgnilnd_schedule_conn(conn);
}
void
kmem_cache_free(kgnilnd_data.kgn_rx_cache, rx);
CDEBUG(D_MALLOC, "slab-freed 'rx': %lu at %p.\n",
sizeof(*rx), rx);
-
- return;
}
int
-kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
+kgnilnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
{
- lnet_hdr_t *hdr = &lntmsg->msg_hdr;
+ struct lnet_hdr *hdr = &lntmsg->msg_hdr;
int type = lntmsg->msg_type;
- lnet_process_id_t target = lntmsg->msg_target;
+ struct lnet_process_id target = lntmsg->msg_target;
int target_is_router = lntmsg->msg_target_is_router;
int routing = lntmsg->msg_routing;
unsigned int niov = lntmsg->msg_niov;
- struct iovec *iov = lntmsg->msg_iov;
- lnet_kiov_t *kiov = lntmsg->msg_kiov;
+ struct bio_vec *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
unsigned int msg_vmflush = lntmsg->msg_vmflush;
kgn_net_t *net = ni->ni_data;
kgn_tx_t *tx;
int rc = 0;
- int mpflag = 0;
+ /* '1' for consistency with code that checks !mpflag to restore */
+ unsigned int mpflag = 1;
int reverse_rdma_flag = *kgnilnd_tunables.kgn_reverse_rdma;
/* NB 'private' is different depending on what we're sending.... */
LASSERTF(niov <= LNET_MAX_IOV,
"lntmsg %p niov %d\n", lntmsg, niov);
- /* payload is either all vaddrs or all pages */
- LASSERTF(!(kiov != NULL && iov != NULL),
- "lntmsg %p kiov %p iov %p\n", lntmsg, kiov, iov);
-
if (msg_vmflush)
- mpflag = cfs_memory_pressure_get_and_set();
+ mpflag = memalloc_noreclaim_save();
switch (type) {
default:
rc = -ENOMEM;
goto out;
}
- /* slightly different options as we might actually have a GET with a
- * MD_KIOV set but a non-NULL md_iov.iov */
- if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
- rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
- lntmsg->msg_md->md_iov.iov, NULL,
- 0, lntmsg->msg_md->md_length);
- else
- rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
- NULL, lntmsg->msg_md->md_iov.kiov,
- 0, lntmsg->msg_md->md_length);
+ rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
+ lntmsg->msg_md->md_kiov,
+ 0, lntmsg->msg_md->md_length);
if (rc != 0) {
CERROR("unable to setup buffer: %d\n", rc);
kgnilnd_tx_done(tx, rc);
goto out;
}
- rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
+ rc = kgnilnd_setup_rdma_buffer(tx, niov,
+ kiov, offset, nob);
if (rc != 0) {
kgnilnd_tx_done(tx, rc);
rc = -EIO;
goto out;
}
- rc = kgnilnd_setup_immediate_buffer(tx, niov, iov, kiov, offset, nob);
+ rc = kgnilnd_setup_immediate_buffer(tx, niov, NULL, kiov, offset, nob);
if (rc != 0) {
kgnilnd_tx_done(tx, rc);
goto out;
out:
/* use stored value as we could have already finalized lntmsg here from a failed launch */
if (msg_vmflush)
- cfs_memory_pressure_restore(mpflag);
+ memalloc_noreclaim_restore(mpflag);
return rc;
}
void
-kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen)
+kgnilnd_setup_rdma(struct lnet_ni *ni, kgn_rx_t *rx, struct lnet_msg *lntmsg, int mlen)
{
kgn_conn_t *conn = rx->grx_conn;
kgn_msg_t *rxmsg = rx->grx_msg;
unsigned int niov = lntmsg->msg_niov;
- struct iovec *iov = lntmsg->msg_iov;
- lnet_kiov_t *kiov = lntmsg->msg_kiov;
+ struct bio_vec *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
int done_type;
if (rc != 0)
goto failed_1;
- rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
+ rc = kgnilnd_setup_rdma_buffer(tx, niov, kiov, offset, nob);
if (rc != 0)
goto failed_1;
kgnilnd_tx_done(tx, rc);
kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
failed_0:
- lnet_finalize(ni, lntmsg, rc);
+ lnet_finalize(lntmsg, rc);
}
int
-kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+kgnilnd_eager_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
void **new_private)
{
kgn_rx_t *rx = private;
CERROR("Couldnt find matching peer %p or conn %p / %p\n",
peer, conn, found_conn);
if (found_conn) {
- CERROR("Unexpected connstamp "LPX64"("LPX64" expected)"
+ CERROR("Unexpected connstamp %#llx(%#llx expected)"
" from %s", rxmsg->gnm_connstamp,
found_conn->gnc_peer_connstamp,
libcfs_nid2str(peer->gnp_nid));
}
int
-kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, unsigned int niov,
- struct iovec *iov, lnet_kiov_t *kiov,
+ struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
kgn_rx_t *rx = private;
LASSERT(!in_interrupt());
LASSERTF(mlen <= rlen, "%d <= %d\n", mlen, rlen);
- /* Either all pages or all vaddrs */
- LASSERTF(!(kiov != NULL && iov != NULL), "kiov %p iov %p\n",
- kiov, iov);
GNIDBG_MSG(D_NET, rxmsg, "conn %p, rxmsg %p, lntmsg %p"
- " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
+ " niov=%d kiov=%p offset=%d mlen=%d rlen=%d",
conn, rxmsg, lntmsg,
- niov, kiov, iov, offset, mlen, rlen);
+ niov, kiov, offset, mlen, rlen);
/* we need to lock here as recv can be called from any context */
read_lock(&kgnilnd_data.kgn_peer_conn_lock);
/* someone closed the conn after we copied this out, nuke it */
kgnilnd_consume_rx(rx);
- lnet_finalize(ni, lntmsg, conn->gnc_error);
+ lnet_finalize(lntmsg, conn->gnc_error);
RETURN(0);
}
read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
switch (rxmsg->gnm_type) {
default:
GNIDBG_MSG(D_NETERROR, rxmsg, "conn %p, rx %p, rxmsg %p, lntmsg %p"
- " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
- conn, rx, rxmsg, lntmsg, niov, kiov, iov, offset, mlen, rlen);
+ " niov=%d kiov=%p offset=%d mlen=%d rlen=%d",
+ conn, rx, rxmsg, lntmsg, niov, kiov, offset, mlen, rlen);
LBUG();
case GNILND_MSG_IMMEDIATE:
}
}
- if (kiov != NULL)
- lnet_copy_flat2kiov(
- niov, kiov, offset,
- *kgnilnd_tunables.kgn_max_immediate,
- &rxmsg[1], 0, mlen);
- else
- lnet_copy_flat2iov(
- niov, iov, offset,
- *kgnilnd_tunables.kgn_max_immediate,
- &rxmsg[1], 0, mlen);
+ lnet_copy_flat2kiov(
+ niov, kiov, offset,
+ *kgnilnd_tunables.kgn_max_immediate,
+ &rxmsg[1], 0, mlen);
kgnilnd_consume_rx(rx);
- lnet_finalize(ni, lntmsg, 0);
+ lnet_finalize(lntmsg, 0);
RETURN(0);
case GNILND_MSG_PUT_REQ:
/* LNET wants to truncate or drop transaction, sending NAK */
if (mlen == 0) {
kgnilnd_consume_rx(rx);
- lnet_finalize(ni, lntmsg, 0);
+ lnet_finalize(lntmsg, 0);
/* only error if lntmsg == NULL, otherwise we are just
* short circuiting the rdma process of 0 bytes */
GOTO(nak_put_req, rc);
}
- rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
+ rc = kgnilnd_setup_rdma_buffer(tx, niov,
+ kiov, offset, mlen);
if (rc != 0) {
GOTO(nak_put_req, rc);
}
/* LNET wants to truncate or drop transaction, sending NAK */
if (mlen == 0) {
kgnilnd_consume_rx(rx);
- lnet_finalize(ni, lntmsg, 0);
+ lnet_finalize(lntmsg, 0);
/* only error if lntmsg == NULL, otherwise we are just
* short circuiting the rdma process of 0 bytes */
if (rc != 0)
GOTO(nak_get_req_rev, rc);
-
- rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
+ rc = kgnilnd_setup_rdma_buffer(tx, niov,
+ kiov, offset, mlen);
if (rc != 0)
GOTO(nak_get_req_rev, rc);
-
tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
rxmsg->gnm_u.putreq.gnprm_cookie;
tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
/* LNET wants to truncate or drop transaction, sending NAK */
if (mlen == 0) {
kgnilnd_consume_rx(rx);
- lnet_finalize(ni, lntmsg, 0);
+ lnet_finalize(lntmsg, 0);
/* only error if lntmsg == NULL, otherwise we are just
* short circuiting the rdma process of 0 bytes */
if (time_after_eq(now, newest_last_rx + timeout)) {
uint32_t level = D_CONSOLE|D_NETERROR;
- if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_DOWN) {
+ if (conn->gnc_peer->gnp_state == GNILND_PEER_DOWN) {
level = D_NET;
}
GNIDBG_CONN(level, conn,
int rc = 0;
int count = 0;
int reconnect;
+ int to_reconn;
short releaseconn = 0;
unsigned long first_rx = 0;
+ int purgatory_conn_cnt = 0;
CDEBUG(D_NET, "checking peer 0x%p->%s for timeouts; interval %lus\n",
peer, libcfs_nid2str(peer->gnp_nid),
peer->gnp_reconnect_interval);
- timeout = cfs_time_seconds(MAX(*kgnilnd_tunables.kgn_timeout,
+ timeout = cfs_time_seconds(max(*kgnilnd_tunables.kgn_timeout,
GNILND_MIN_TIMEOUT));
conn = kgnilnd_find_conn_locked(peer);
conn->gnc_close_recvd = GNILND_CLOSE_INJECT1;
conn->gnc_peer_error = -ETIMEDOUT;
}
+
+ if (*kgnilnd_tunables.kgn_to_reconn_disable &&
+ rc == -ETIMEDOUT) {
+ peer->gnp_state = GNILND_PEER_TIMED_OUT;
+ CDEBUG(D_WARNING, "%s conn timed out, will "
+ "reconnect upon request from peer\n",
+ libcfs_nid2str(conn->gnc_peer->gnp_nid));
+ }
/* Once we mark closed, any of the scheduler threads could
* get it and move through before we hit the fail loc code */
kgnilnd_close_conn_locked(conn, rc);
/* Don't reconnect if we are still trying to clear out old conns.
* This prevents us sending traffic on the new mbox before ensuring we are done
* with the old one */
- reconnect = (peer->gnp_down == GNILND_RCA_NODE_UP) &&
+ reconnect = (peer->gnp_state == GNILND_PEER_UP) &&
(atomic_read(&peer->gnp_dirty_eps) == 0);
+ /* fast reconnect after a timeout */
+ to_reconn = !conn &&
+ (peer->gnp_last_errno == -ETIMEDOUT) &&
+ *kgnilnd_tunables.kgn_fast_reconn;
+
/* if we are not connected and there are tx on the gnp_tx_queue waiting
* to be sent, we'll check the reconnect interval and fire up a new
* connection request */
- if ((peer->gnp_connecting == GNILND_PEER_IDLE) &&
+ if (reconnect &&
+ (peer->gnp_connecting == GNILND_PEER_IDLE) &&
(time_after_eq(jiffies, peer->gnp_reconnect_time)) &&
- !list_empty(&peer->gnp_tx_queue) && reconnect) {
+ (!list_empty(&peer->gnp_tx_queue) || to_reconn)) {
CDEBUG(D_NET, "starting connect to %s\n",
libcfs_nid2str(peer->gnp_nid));
- LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE, "Peer was idle and we"
- "have a write_lock, state issue %d\n", peer->gnp_connecting);
+ LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE,
+ "Peer was idle and we have a write_lock, state issue %d\n",
+ peer->gnp_connecting);
peer->gnp_connecting = GNILND_PEER_CONNECT;
kgnilnd_peer_addref(peer); /* extra ref for connd */
cfs_duration_sec(waiting));
kgnilnd_detach_purgatory_locked(conn, souls);
+ } else {
+ purgatory_conn_cnt++;
}
}
}
- return;
+ /* If we have too many connections in purgatory we could run out of
+ * resources. Limit the number of connections to a tunable number,
+ * clean up to the minimum all in one fell swoop... there are
+ * situations where dvs will retry tx's and we can eat up several
+ * hundread connection requests at once.
+ */
+ if (purgatory_conn_cnt > *kgnilnd_tunables.kgn_max_purgatory) {
+ list_for_each_entry_safe(conn, connN, &peer->gnp_conns,
+ gnc_list) {
+ if (conn->gnc_in_purgatory &&
+ conn->gnc_state == GNILND_CONN_DONE) {
+ CDEBUG(D_NET, "Dropping Held resource due to"
+ " resource limits being hit\n");
+ kgnilnd_detach_purgatory_locked(conn, souls);
+
+ if (purgatory_conn_cnt-- <
+ *kgnilnd_tunables.kgn_max_purgatory)
+ break;
+ }
+ }
+ }
}
void
{
struct list_head *peers = &kgnilnd_data.kgn_peers[idx];
struct list_head *ctmp, *ctmpN;
- struct list_head geriatrics;
- struct list_head souls;
-
- INIT_LIST_HEAD(&geriatrics);
- INIT_LIST_HEAD(&souls);
+ LIST_HEAD(geriatrics);
+ LIST_HEAD(souls);
write_lock(&kgnilnd_data.kgn_peer_conn_lock);
struct timer_list timer;
DEFINE_WAIT(wait);
- cfs_block_allsigs();
-
/* all gnilnd threads need to run fairly urgently */
set_user_nice(current, *kgnilnd_tunables.kgn_nice);
spin_lock(&kgnilnd_data.kgn_reaper_lock);
next_check_time);
mod_timer(&timer, (long) jiffies + timeout);
- /* check flag variables before comitting */
+ /* check flag variables before committing */
if (!kgnilnd_data.kgn_shutdown &&
!kgnilnd_data.kgn_quiesce_trigger) {
CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
int
kgnilnd_recv_bte_get(kgn_tx_t *tx) {
unsigned niov, offset, nob;
- lnet_kiov_t *kiov;
- lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
+ struct bio_vec *kiov;
+ struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, tx->tx_nob_rdma);
if (kiov != NULL) {
}
if (rrc == GNI_RC_NOT_DONE) {
- mutex_unlock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
CDEBUG(D_INFO, "SEND RDMA CQ %d empty processed %ld\n",
dev->gnd_id, num_processed);
return num_processed;
"this is bad, somehow our credits didn't protect us"
" from CQ overrun\n");
LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_POST,
- "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc,
+ "rrc %d, GNI_CQ_GET_TYPE(%#llx) = %#llx\n", rrc,
event_data, GNI_CQ_GET_TYPE(event_data));
rrc = kgnilnd_get_completed(dev->gnd_snd_rdma_cqh, event_data,
&desc);
- mutex_unlock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
/* XXX Nic: Need better error handling here... */
LASSERTF((rrc == GNI_RC_SUCCESS) ||
}
/* remove from rdmaq */
+ kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
spin_lock(&conn->gnc_list_lock);
kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
spin_unlock(&conn->gnc_list_lock);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
+
+ if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
+ event_data = 1LL << 48;
+ rc = 1;
+ }
if (likely(desc->status == GNI_RC_SUCCESS) && rc == 0) {
atomic_inc(&dev->gnd_rdma_ntx);
/* drop ref from kgnilnd_validate_tx_ev_id */
kgnilnd_admin_decref(conn->gnc_tx_in_use);
kgnilnd_conn_decref(conn);
+
continue;
}
-EFAULT,
rcookie,
tx->tx_msg.gnm_srcnid);
- kgnilnd_tx_done(tx, -EFAULT);
+ kgnilnd_tx_done(tx, -GNILND_NOPURG);
kgnilnd_close_conn(conn, -ECOMM);
}
kgn_conn_t *conn = NULL;
int queued_fma, saw_reply, rc;
long num_processed = 0;
+ struct list_head *ctmp, *ctmpN;
for (;;) {
/* make sure we don't keep looping if we need to reset */
}
rrc = kgnilnd_cq_get_event(dev->gnd_snd_fma_cqh, &event_data);
- mutex_unlock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
if (rrc == GNI_RC_NOT_DONE) {
CDEBUG(D_INFO,
- "SMSG send CQ %d not ready (data "LPX64") "
+ "SMSG send CQ %d not ready (data %#llx) "
"processed %ld\n", dev->gnd_id, event_data,
num_processed);
+
+ if (num_processed > 0) {
+ spin_lock(&dev->gnd_lock);
+ if (!list_empty(&dev->gnd_delay_conns)) {
+ list_for_each_safe(ctmp, ctmpN, &dev->gnd_delay_conns) {
+ conn = list_entry(ctmp, kgn_conn_t, gnc_delaylist);
+ list_del_init(&conn->gnc_delaylist);
+ CDEBUG(D_NET, "Moving Conn %p from delay queue to ready_queue\n", conn);
+ kgnilnd_schedule_conn_nolock(conn);
+ }
+ spin_unlock(&dev->gnd_lock);
+ kgnilnd_schedule_device(dev);
+ } else {
+ spin_unlock(&dev->gnd_lock);
+ }
+ }
return num_processed;
}
"this is bad, somehow our credits didn't "
"protect us from CQ overrun\n");
LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_SMSG,
- "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc,
+ "rrc %d, GNI_CQ_GET_TYPE(%#llx) = %#llx\n", rrc,
event_data, GNI_CQ_GET_TYPE(event_data));
/* if SMSG couldn't handle an error, time for conn to die */
if (conn == NULL) {
/* Conn was destroyed? */
CDEBUG(D_NET,
- "SMSG CQID lookup "LPX64" failed\n",
+ "SMSG CQID lookup %#llx failed\n",
GNI_CQ_GET_INST_ID(event_data));
write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
continue;
}
/* lock tx_list_state and tx_state */
+ kgnilnd_conn_mutex_lock(&conn->gnc_smsg_mutex);
spin_lock(&tx->tx_conn->gnc_list_lock);
GNITX_ASSERTF(tx, tx->tx_list_state == GNILND_TX_LIVE_FMAQ,
saw_reply = !(tx->tx_state & GNILND_TX_WAITING_REPLY);
spin_unlock(&tx->tx_conn->gnc_list_lock);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
if (queued_fma) {
CDEBUG(D_NET, "scheduling conn 0x%p->%s for fmaq\n",
return 1;
}
rrc = kgnilnd_cq_get_event(dev->gnd_rcv_fma_cqh, &event_data);
- mutex_unlock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
if (rrc == GNI_RC_NOT_DONE) {
- CDEBUG(D_INFO, "SMSG RX CQ %d empty data "LPX64" "
+ CDEBUG(D_INFO, "SMSG RX CQ %d empty data %#llx "
"processed %ld\n",
dev->gnd_id, event_data, num_processed);
return num_processed;
/* set overrun too */
event_data |= (1UL << 63);
LASSERTF(GNI_CQ_OVERRUN(event_data),
- "(1UL << 63) is no longer the bit to"
- "set to indicate CQ_OVERRUN\n");
+ "(1UL << 63) is no longer the bit to set to indicate CQ_OVERRUN\n");
}
}
/* sender should get error event too and take care
of failed transaction by re-transmitting */
if (rrc == GNI_RC_TRANSACTION_ERROR) {
- CDEBUG(D_NET, "SMSG RX CQ error "LPX64"\n", event_data);
+ CDEBUG(D_NET, "SMSG RX CQ error %#llx\n", event_data);
continue;
}
conn = kgnilnd_cqid2conn_locked(
GNI_CQ_GET_INST_ID(event_data));
if (conn == NULL) {
- CDEBUG(D_NET, "SMSG RX CQID lookup "LPU64" "
- "failed, dropping event "LPX64"\n",
+ CDEBUG(D_NET, "SMSG RX CQID lookup %llu "
+ "failed, dropping event %#llx\n",
GNI_CQ_GET_INST_ID(event_data),
event_data);
} else {
- CDEBUG(D_NET, "SMSG RX: CQID "LPU64" "
+ CDEBUG(D_NET, "SMSG RX: CQID %llu "
"conn %p->%s\n",
GNI_CQ_GET_INST_ID(event_data),
conn, conn->gnc_peer ?
rc = kgnilnd_map_buffer(tx);
}
- /* rc should be 0 if we mapped succesfully here, if non-zero we are queueing */
+ /* rc should be 0 if we mapped successfully here, if non-zero
+ * we are queueing */
if (rc != 0) {
/* if try_map_if_full set, they handle requeuing */
if (unlikely(try_map_if_full)) {
* remote node where the RDMA will be started
* Special case -EAGAIN logic - this should just queued as if the mapping couldn't
* be satisified. The rest of the errors are "hard" errors that require
- * upper layers to handle themselves */
+ * upper layers to handle themselves.
+ * If kgnilnd_post_rdma returns a resource error, kgnilnd_rdma will put
+ * the tx back on the TX_MAPQ. When this tx is pulled back off the MAPQ,
+ * it's gnm_type will now be GNILND_MSG_PUT_DONE or
+ * GNILND_MSG_GET_DONE_REV.
+ */
case GNILND_MSG_GET_REQ:
tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
tx->tx_msg.gnm_u.get.gngm_cookie = tx->tx_id.txe_cookie;
break;
/* PUT_REQ and GET_DONE are where we do the actual RDMA */
+ case GNILND_MSG_PUT_DONE:
case GNILND_MSG_PUT_REQ:
- kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE,
+ rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE,
&tx->tx_putinfo.gnpam_desc,
tx->tx_putinfo.gnpam_desc.gnrd_nob,
tx->tx_putinfo.gnpam_dst_cookie);
+ RETURN(try_map_if_full ? rc : 0);
break;
case GNILND_MSG_GET_DONE:
- kgnilnd_rdma(tx, GNILND_MSG_GET_DONE,
+ rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE,
&tx->tx_getinfo.gngm_desc,
tx->tx_lntmsg[0]->msg_len,
tx->tx_getinfo.gngm_cookie);
-
+ RETURN(try_map_if_full ? rc : 0);
break;
case GNILND_MSG_PUT_REQ_REV:
tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
break;
case GNILND_MSG_PUT_DONE_REV:
- kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV,
+ rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV,
&tx->tx_getinfo.gngm_desc,
tx->tx_nob,
tx->tx_getinfo.gngm_cookie);
+ RETURN(try_map_if_full ? rc : 0);
break;
case GNILND_MSG_GET_ACK_REV:
tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_key = tx->tx_map_key;
/* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
break;
+ case GNILND_MSG_GET_DONE_REV:
case GNILND_MSG_GET_REQ_REV:
- kgnilnd_rdma(tx, GNILND_MSG_GET_DONE_REV,
+ rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE_REV,
&tx->tx_putinfo.gnpam_desc,
tx->tx_putinfo.gnpam_desc.gnrd_nob,
tx->tx_putinfo.gnpam_dst_cookie);
-
+ RETURN(try_map_if_full ? rc : 0);
break;
}
GNITX_ASSERTF(tx, tx->tx_id.txe_smsg_id != 0,
"tx with zero id", NULL);
- CDEBUG(D_NET, "sending regular msg: %p, type %s(0x%02x), cookie "LPX64"\n",
+ CDEBUG(D_NET, "sending regular msg: %p, type %s(0x%02x), cookie %#llx\n",
tx, kgnilnd_msgtype2str(tx->tx_msg.gnm_type),
tx->tx_msg.gnm_type, tx->tx_id.txe_cookie);
GNITX_ASSERTF(tx, ((tx->tx_id.txe_idx == ev_id.txe_idx) &&
(tx->tx_id.txe_cookie = cookie)),
"conn 0x%p->%s tx_ref_table hosed: wanted "
- "txe_cookie "LPX64" txe_idx %d "
- "found tx %p cookie "LPX64" txe_idx %d\n",
+ "txe_cookie %#llx txe_idx %d "
+ "found tx %p cookie %#llx txe_idx %d\n",
conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
cookie, ev_id.txe_idx,
tx, tx->tx_id.txe_cookie, tx->tx_id.txe_idx);
tx->tx_state, GNILND_TX_WAITING_REPLY,
libcfs_nid2str(conn->gnc_peer->gnp_nid));
} else {
- CWARN("Unmatched reply %02x, or %02x/"LPX64" from %s\n",
+ CWARN("Unmatched reply %02x, or %02x/%#llx from %s\n",
type1, type2, cookie, libcfs_nid2str(conn->gnc_peer->gnp_nid));
}
return tx;
tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
if (rc == -EFAULT) {
- CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16"LPF64"u physnop %8d buffertype %#8x MemHandle "LPX64"."LPX64"x\n",
+ CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16llu physnop %8d buffertype %#8x MemHandle %#llx.%#llxx\n",
rc, tx, id, nob, physnop, buftype, hndl.qword1, hndl.qword2);
if(*kgnilnd_tunables.kgn_efault_lbug) {
RETURN_EXIT;
timestamp = jiffies;
- mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
/* delay in jiffies - we are really concerned only with things that
* result in a schedule() or really holding this off for long times .
* NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
libcfs_nid2str(conn->gnc_peer->gnp_nid),
cfs_duration_sec(timestamp - newest_last_rx),
cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
rc = -ETIME;
kgnilnd_close_conn(conn, rc);
RETURN_EXIT;
rrc = kgnilnd_smsg_getnext(conn->gnc_ephandle, &prefix);
if (rrc == GNI_RC_NOT_DONE) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
- CDEBUG(D_INFO, "SMSG RX empty\n");
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ CDEBUG(D_INFO, "SMSG RX empty conn 0x%p\n", conn);
RETURN_EXIT;
}
*/
if (rrc == GNI_RC_INVALID_STATE) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
GNIDBG_CONN(D_NETERROR | D_CONSOLE, conn, "Mailbox corruption "
"detected closing conn %p from peer %s\n", conn,
libcfs_nid2str(conn->gnc_peer->gnp_nid));
rx = kgnilnd_alloc_rx();
if (rx == NULL) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
kgnilnd_release_msg(conn);
GNIDBG_MSG(D_NETERROR, msg, "Dropping SMSG RX from 0x%p->%s, no RX memory",
conn, libcfs_nid2str(peer->gnp_nid));
RETURN_EXIT;
}
- GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p from %s",
- conn, libcfs_nid2str(peer->gnp_nid));
+ GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p", conn);
timestamp = conn->gnc_last_rx;
- last_seq = conn->gnc_rx_seq;
+ seq = last_seq = atomic_read(&conn->gnc_rx_seq);
+ atomic_inc(&conn->gnc_rx_seq);
conn->gnc_last_rx = jiffies;
/* stash first rx so we can clear out purgatory
if (conn->gnc_first_rx == 0)
conn->gnc_first_rx = jiffies;
- seq = conn->gnc_rx_seq++;
-
/* needs to linger to protect gnc_rx_seq like we do with gnc_tx_seq */
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
kgnilnd_peer_alive(conn->gnc_peer);
rx->grx_msg = msg;
}
if (msg->gnm_connstamp != conn->gnc_peer_connstamp) {
- GNIDBG_MSG(D_NETERROR, msg, "Unexpected connstamp "LPX64"("LPX64
+ GNIDBG_MSG(D_NETERROR, msg, "Unexpected connstamp %#llx(%#llx"
" expected) from %s",
msg->gnm_connstamp, conn->gnc_peer_connstamp,
libcfs_nid2str(peer->gnp_nid));
conn, last_seq,
cfs_duration_sec(now - timestamp),
cfs_duration_sec(now - conn->gnc_last_rx_cq),
- conn->gnc_tx_seq,
+ atomic_read(&conn->gnc_tx_seq),
cfs_duration_sec(now - conn->gnc_last_tx),
cfs_duration_sec(now - conn->gnc_last_tx_cq),
cfs_duration_sec(now - conn->gnc_last_noop_want),
* mapped so we can reset our timers */
dev->gnd_map_attempt = 0;
continue;
+ } else if (rc == -EAGAIN) {
+ spin_lock(&dev->gnd_lock);
+ mod_timer(&dev->gnd_map_timer, dev->gnd_next_map);
+ spin_unlock(&dev->gnd_lock);
+ GOTO(get_out_mapped, rc);
} else if (rc != -ENOMEM) {
/* carp, failure we can't handle */
kgnilnd_tx_done(tx, rc);
} else {
GNIDBG_TX(log_retrans_level, tx,
"transient map failure #%d %d pages/%d bytes phys %u@%u "
- "virt %u@"LPU64" "
+ "virt %u@%llu "
"nq_map %d mdd# %d/%d GART %ld",
dev->gnd_map_attempt, tx->tx_phys_npages, tx->tx_nob,
dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
conn = list_first_entry(&dev->gnd_ready_conns, kgn_conn_t, gnc_schedlist);
list_del_init(&conn->gnc_schedlist);
+ /*
+ * Since we are processing conn now, we don't need to be on the delaylist any longer.
+ */
+
+ if (!list_empty(&conn->gnc_delaylist))
+ list_del_init(&conn->gnc_delaylist);
spin_unlock(&dev->gnd_lock);
conn_sched = xchg(&conn->gnc_scheduled, GNILND_CONN_PROCESS);
kgnilnd_conn_decref(conn);
up_write(&dev->gnd_conn_sem);
} else if (rc != 1) {
- kgnilnd_conn_decref(conn);
+ kgnilnd_conn_decref(conn);
}
/* clear this so that scheduler thread doesn't spin */
found_work = 0;
* yet. Cycle this conn back through
* the scheduler. */
kgnilnd_schedule_conn(conn);
- } else
- kgnilnd_complete_closed_conn(conn);
-
+ } else {
+ kgnilnd_complete_closed_conn(conn);
+ }
up_write(&dev->gnd_conn_sem);
} else if (unlikely(conn->gnc_state == GNILND_CONN_DESTROY_EP)) {
/* DESTROY_EP set in kgnilnd_conn_decref on gnc_refcount = 1 */
kgnilnd_conn_decref(conn);
up_write(&dev->gnd_conn_sem);
} else if (rc != 1) {
- kgnilnd_conn_decref(conn);
+ kgnilnd_conn_decref(conn);
}
/* check list again with lock held */
dev = &kgnilnd_data.kgn_devices[(threadno + 1) % kgnilnd_data.kgn_ndevs];
- cfs_block_allsigs();
-
/* all gnilnd threads need to run fairly urgently */
set_user_nice(current, *kgnilnd_tunables.kgn_sched_nice);
deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_sched_timeout);