#define _kgnilnd_cksum(seed, ptr, nob) csum_partial(ptr, nob, seed)
/* we don't use offset as every one is passing a buffer reference that already
- * includes the offset into the base address -
- * see kgnilnd_setup_virt_buffer and kgnilnd_setup_immediate_buffer */
+ * includes the offset into the base address.
+ */
static inline __u16
kgnilnd_cksum(void *ptr, size_t nob)
{
kgnilnd_queue_tx(conn, tx);
}
-int
+static int
kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
- struct kvec *iov, struct bio_vec *kiov,
+ struct bio_vec *kiov,
unsigned int offset, unsigned int nob)
{
kgn_msg_t *msg = &tx->tx_msg;
if (nob == 0) {
tx->tx_buffer = NULL;
- } else if (kiov != NULL) {
+ } else {
if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) {
niov = round_up(nob + offset + kiov->bv_offset,
}
LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
- "bad niov %d msg %p kiov %p iov %p offset %d nob%d\n",
- niov, msg, kiov, iov, offset, nob);
+ "bad niov %d msg %p kiov %p offset %d nob%d\n",
+ niov, msg, kiov, offset, nob);
while (offset >= kiov->bv_len) {
offset -= kiov->bv_len;
tx->tx_buftype = GNILND_BUF_IMMEDIATE_KIOV;
tx->tx_nob = nob;
- } else {
- /* For now this is almost identical to kgnilnd_setup_virt_buffer, but we
- * could "flatten" the payload into a single contiguous buffer ready
- * for sending direct over an FMA if we ever needed to. */
-
- LASSERT(niov > 0);
-
- while (offset >= iov->iov_len) {
- offset -= iov->iov_len;
- niov--;
- iov++;
- LASSERT(niov > 0);
- }
-
- if (nob > iov->iov_len - offset) {
- CERROR("Can't handle multiple vaddr fragments\n");
- return -EMSGSIZE;
- }
-
- tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
-
- tx->tx_buftype = GNILND_BUF_IMMEDIATE;
- tx->tx_nob = nob;
}
/* checksum payload early - it shouldn't be changing after lnd_send */
}
int
-kgnilnd_setup_virt_buffer(kgn_tx_t *tx,
- unsigned int niov, struct kvec *iov,
- unsigned int offset, unsigned int nob)
-
-{
- LASSERT(nob > 0);
- LASSERT(niov > 0);
- LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
-
- while (offset >= iov->iov_len) {
- offset -= iov->iov_len;
- niov--;
- iov++;
- LASSERT(niov > 0);
- }
-
- if (nob > iov->iov_len - offset) {
- CERROR("Can't handle multiple vaddr fragments\n");
- return -EMSGSIZE;
- }
-
- tx->tx_buftype = GNILND_BUF_VIRT_UNMAPPED;
- tx->tx_nob = nob;
- tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
- return 0;
-}
-
-int
kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, struct bio_vec *kiov,
unsigned int offset, unsigned int nob)
{
static inline int
kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
- struct kvec *iov, struct bio_vec *kiov,
+ struct bio_vec *kiov,
unsigned int offset, unsigned int nob)
{
- int rc;
-
- LASSERTF((iov == NULL) != (kiov == NULL), "iov 0x%p, kiov 0x%p, tx 0x%p,"
- " offset %d, nob %d, niov %d\n"
- , iov, kiov, tx, offset, nob, niov);
-
- if (kiov != NULL) {
- rc = kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
- } else {
- rc = kgnilnd_setup_virt_buffer(tx, niov, iov, offset, nob);
- }
- return rc;
+ return kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
}
/* kgnilnd_parse_lnet_rdma()
dev->gnd_map_nphys++;
dev->gnd_map_physnop += tx->tx_phys_npages;
break;
-
- case GNILND_BUF_VIRT_MAPPED:
- bytes = tx->tx_nob;
- dev->gnd_map_nvirt++;
- dev->gnd_map_virtnob += tx->tx_nob;
- break;
}
if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
dev->gnd_map_nphys--;
dev->gnd_map_physnop -= tx->tx_phys_npages;
break;
-
- case GNILND_BUF_VIRT_UNMAPPED:
- bytes = tx->tx_nob;
- dev->gnd_map_nvirt--;
- dev->gnd_map_virtnob -= tx->tx_nob;
- break;
}
if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
case GNILND_BUF_IMMEDIATE:
case GNILND_BUF_IMMEDIATE_KIOV:
case GNILND_BUF_PHYS_MAPPED:
- case GNILND_BUF_VIRT_MAPPED:
return 0;
case GNILND_BUF_PHYS_UNMAPPED:
* - this needs to turn into a non-fatal error soon to allow
* GART resource, etc starvation handling */
if (rrc != GNI_RC_SUCCESS) {
- GNIDBG_TX(D_NET, tx, "Can't map %d pages: dev %d "
- "phys %u pp %u, virt %u nob %llu",
+ GNIDBG_TX(D_NET, tx,
+ "Can't map %d pages: dev %d phys %u pp %u",
tx->tx_phys_npages, dev->gnd_id,
- dev->gnd_map_nphys, dev->gnd_map_physnop,
- dev->gnd_map_nvirt, dev->gnd_map_virtnob);
+ dev->gnd_map_nphys, dev->gnd_map_physnop);
RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
}
tx->tx_buftype = GNILND_BUF_PHYS_MAPPED;
kgnilnd_mem_add_map_list(dev, tx);
return 0;
-
- case GNILND_BUF_VIRT_UNMAPPED:
- rrc = kgnilnd_mem_register(dev->gnd_handle,
- (__u64)tx->tx_buffer, tx->tx_nob,
- NULL, flags, &tx->tx_map_key);
- if (rrc != GNI_RC_SUCCESS) {
- GNIDBG_TX(D_NET, tx, "Can't map %u bytes: dev %d "
- "phys %u pp %u, virt %u nob %llu",
- tx->tx_nob, dev->gnd_id,
- dev->gnd_map_nphys, dev->gnd_map_physnop,
- dev->gnd_map_nvirt, dev->gnd_map_virtnob);
- RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
- }
-
- tx->tx_buftype = GNILND_BUF_VIRT_MAPPED;
- kgnilnd_mem_add_map_list(dev, tx);
- if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
- tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
- atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_out);
- GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %ld\n",
- tx->tx_nob, atomic64_read(&dev->gnd_rdmaq_bytes_out));
- }
-
- return 0;
}
}
/* code below relies on +1 relationship ... */
BUILD_BUG_ON(GNILND_BUF_PHYS_MAPPED !=
(GNILND_BUF_PHYS_UNMAPPED + 1));
- BUILD_BUG_ON(GNILND_BUF_VIRT_MAPPED !=
- (GNILND_BUF_VIRT_UNMAPPED + 1));
switch (tx->tx_buftype) {
default:
case GNILND_BUF_NONE:
case GNILND_BUF_IMMEDIATE:
case GNILND_BUF_PHYS_UNMAPPED:
- case GNILND_BUF_VIRT_UNMAPPED:
break;
case GNILND_BUF_IMMEDIATE_KIOV:
if (tx->tx_phys != NULL) {
break;
case GNILND_BUF_PHYS_MAPPED:
- case GNILND_BUF_VIRT_MAPPED:
LASSERT(tx->tx_conn != NULL);
dev = tx->tx_conn->gnc_device;
goto out;
}
rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
- NULL, lntmsg->msg_md->md_kiov,
+ lntmsg->msg_md->md_kiov,
0, lntmsg->msg_md->md_length);
if (rc != 0) {
CERROR("unable to setup buffer: %d\n", rc);
goto out;
}
- rc = kgnilnd_setup_rdma_buffer(tx, niov, NULL,
+ rc = kgnilnd_setup_rdma_buffer(tx, niov,
kiov, offset, nob);
if (rc != 0) {
kgnilnd_tx_done(tx, rc);
if (rc != 0)
goto failed_1;
- rc = kgnilnd_setup_rdma_buffer(tx, niov, NULL, kiov, offset, nob);
+ rc = kgnilnd_setup_rdma_buffer(tx, niov, kiov, offset, nob);
if (rc != 0)
goto failed_1;
}
}
- if (kiov != NULL)
- lnet_copy_flat2kiov(
- niov, kiov, offset,
- *kgnilnd_tunables.kgn_max_immediate,
- &rxmsg[1], 0, mlen);
- else
- lnet_copy_flat2iov(
- niov, NULL, offset,
- *kgnilnd_tunables.kgn_max_immediate,
- &rxmsg[1], 0, mlen);
+ lnet_copy_flat2kiov(
+ niov, kiov, offset,
+ *kgnilnd_tunables.kgn_max_immediate,
+ &rxmsg[1], 0, mlen);
kgnilnd_consume_rx(rx);
lnet_finalize(lntmsg, 0);
GOTO(nak_put_req, rc);
}
- rc = kgnilnd_setup_rdma_buffer(tx, niov, NULL,
+ rc = kgnilnd_setup_rdma_buffer(tx, niov,
kiov, offset, mlen);
if (rc != 0) {
GOTO(nak_put_req, rc);
if (rc != 0)
GOTO(nak_get_req_rev, rc);
-
- rc = kgnilnd_setup_rdma_buffer(tx, niov, NULL,
+ rc = kgnilnd_setup_rdma_buffer(tx, niov,
kiov, offset, mlen);
if (rc != 0)
GOTO(nak_get_req_rev, rc);
-
tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
rxmsg->gnm_u.putreq.gnprm_cookie;
tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
if (tx == NULL)
break;
- GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
- tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+ GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
"bad tx buftype %d", tx->tx_buftype);
kgnilnd_finalize_rx_done(tx, msg);
if (tx == NULL)
break;
- GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
- tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+ GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
"bad tx buftype %d", tx->tx_buftype);
kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
if (tx == NULL)
break;
- GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
- tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+ GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
"bad tx buftype %d", tx->tx_buftype);
lnet_set_reply_msg_len(net->gnn_ni, tx->tx_lntmsg[1],
if (tx == NULL)
break;
- GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
- tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+ GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
"bad tx buftype %d", tx->tx_buftype);
kgnilnd_finalize_rx_done(tx, msg);
if (tx == NULL)
break;
- GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
- tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+ GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
"bad tx buftype %d", tx->tx_buftype);
kgnilnd_finalize_rx_done(tx, msg);
if (tx == NULL)
break;
- GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
- tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+ GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
"bad tx buftype %d", tx->tx_buftype);
kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
} else {
GNIDBG_TX(log_retrans_level, tx,
"transient map failure #%d %d pages/%d bytes phys %u@%u "
- "virt %u@%llu "
"nq_map %d mdd# %d/%d GART %ld",
dev->gnd_map_attempt, tx->tx_phys_npages, tx->tx_nob,
dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
- dev->gnd_map_nvirt, dev->gnd_map_virtnob,
atomic_read(&dev->gnd_nq_map),
atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held),
atomic64_read(&dev->gnd_nbytes_map));