int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg);
/* In data movement APIs below, payload buffers are described as a set
- * of 'niov' fragments which are...
- * EITHER
- * in virtual memory (struct kvec *iov != NULL)
- * OR
- * in pages (kernel only: plt_kiov_t *kiov != NULL).
+ * of 'niov' fragments which are in pages.
* The LND may NOT overwrite these fragment descriptors.
* An 'offset' and may specify a byte offset within the set of
* fragments to start from
* credit if the LND does flow control. */
int (*lnd_recv)(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
int delayed, unsigned int niov,
- struct kvec *iov, struct bio_vec *kiov,
+ struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen);
/* lnet_parse() has had to delay processing of this message
int
kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, unsigned int niov,
- struct kvec *iov, struct bio_vec *kiov,
+ struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
kgn_rx_t *rx = private;
LASSERT(!in_interrupt());
LASSERTF(mlen <= rlen, "%d <= %d\n", mlen, rlen);
- /* Either all pages or all vaddrs */
- LASSERTF(!(kiov != NULL && iov != NULL), "kiov %p iov %p\n",
- kiov, iov);
GNIDBG_MSG(D_NET, rxmsg, "conn %p, rxmsg %p, lntmsg %p"
- " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
+ " niov=%d kiov=%p offset=%d mlen=%d rlen=%d",
conn, rxmsg, lntmsg,
- niov, kiov, iov, offset, mlen, rlen);
+ niov, kiov, offset, mlen, rlen);
/* we need to lock here as recv can be called from any context */
read_lock(&kgnilnd_data.kgn_peer_conn_lock);
switch (rxmsg->gnm_type) {
default:
GNIDBG_MSG(D_NETERROR, rxmsg, "conn %p, rx %p, rxmsg %p, lntmsg %p"
- " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
- conn, rx, rxmsg, lntmsg, niov, kiov, iov, offset, mlen, rlen);
+ " niov=%d kiov=%p offset=%d mlen=%d rlen=%d",
+ conn, rx, rxmsg, lntmsg, niov, kiov, offset, mlen, rlen);
LBUG();
case GNILND_MSG_IMMEDIATE:
&rxmsg[1], 0, mlen);
else
lnet_copy_flat2iov(
- niov, iov, offset,
+ niov, NULL, offset,
*kgnilnd_tunables.kgn_max_immediate,
&rxmsg[1], 0, mlen);
GOTO(nak_put_req, rc);
}
- rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
+ rc = kgnilnd_setup_rdma_buffer(tx, niov, NULL,
+ kiov, offset, mlen);
if (rc != 0) {
GOTO(nak_put_req, rc);
}
GOTO(nak_get_req_rev, rc);
- rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
+ rc = kgnilnd_setup_rdma_buffer(tx, niov, NULL,
+ kiov, offset, mlen);
if (rc != 0)
GOTO(nak_get_req_rev, rc);
int kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
- int delayed, unsigned int niov, struct kvec *iov,
+ int delayed, unsigned int niov,
struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
unsigned int rlen);
return -EINVAL;
}
-static int kiblnd_setup_rd_iov(struct lnet_ni *ni, struct kib_tx *tx,
- struct kib_rdma_desc *rd, unsigned int niov,
- struct kvec *iov, int offset, int nob)
-{
- struct kib_net *net = ni->ni_data;
- struct page *page;
- struct scatterlist *sg;
- unsigned long vaddr;
- int fragnob;
- int page_offset;
- unsigned int max_niov;
-
- LASSERT (nob > 0);
- LASSERT (niov > 0);
- LASSERT (net != NULL);
-
- while (offset >= iov->iov_len) {
- offset -= iov->iov_len;
- niov--;
- iov++;
- LASSERT (niov > 0);
- }
-
- max_niov = niov;
-
- sg = tx->tx_frags;
- do {
- LASSERT(niov > 0);
-
- vaddr = ((unsigned long)iov->iov_base) + offset;
- page_offset = vaddr & (PAGE_SIZE - 1);
- page = lnet_kvaddr_to_page(vaddr);
- if (page == NULL) {
- CERROR("Can't find page\n");
- return -EFAULT;
- }
-
- fragnob = min((int)(iov->iov_len - offset), nob);
- fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
-
- /*
- * We're allowed to start at a non-aligned page offset in
- * the first fragment and end at a non-aligned page offset
- * in the last fragment.
- */
- if ((fragnob < (int)PAGE_SIZE - page_offset) &&
- (niov < max_niov) && nob > fragnob) {
- CDEBUG(D_NET, "fragnob %d < available page %d: with"
- " remaining %d iovs with %d nob left\n",
- fragnob, (int)PAGE_SIZE - page_offset, niov,
- nob);
- tx->tx_gaps = true;
- }
-
- sg_set_page(sg, page, fragnob, page_offset);
- sg = sg_next(sg);
- if (!sg) {
- CERROR("lacking enough sg entries to map tx\n");
- return -EFAULT;
- }
-
- if (offset + fragnob < iov->iov_len) {
- offset += fragnob;
- } else {
- offset = 0;
- iov++;
- niov--;
- }
- nob -= fragnob;
- } while (nob > 0);
-
- return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
-}
-
static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx,
struct kib_rdma_desc *rd, int nkiov,
struct bio_vec *kiov, int offset, int nob)
int
kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
- int delayed, unsigned int niov, struct kvec *iov, struct bio_vec *kiov,
+ int delayed, unsigned int niov, struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
struct kib_rx *rx = private;
LASSERT (mlen <= rlen);
LASSERT (!in_interrupt());
- /* Either all pages or all vaddrs */
- LASSERT (!(kiov != NULL && iov != NULL));
switch (rxmsg->ibm_type) {
default:
break;
}
- if (kiov != NULL)
- lnet_copy_flat2kiov(niov, kiov, offset,
- IBLND_MSG_SIZE, rxmsg,
- offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
- mlen);
- else
- lnet_copy_flat2iov(niov, iov, offset,
- IBLND_MSG_SIZE, rxmsg,
- offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
- mlen);
+ lnet_copy_flat2kiov(niov, kiov, offset,
+ IBLND_MSG_SIZE, rxmsg,
+ offsetof(struct kib_msg,
+ ibm_u.immediate.ibim_payload),
+ mlen);
lnet_finalize(lntmsg, 0);
break;
txmsg = tx->tx_msg;
rd = &txmsg->ibm_u.putack.ibpam_rd;
- if (kiov == NULL)
- rc = kiblnd_setup_rd_iov(ni, tx, rd,
- niov, iov, offset, mlen);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, rd,
- niov, kiov, offset, mlen);
+ rc = kiblnd_setup_rd_kiov(ni, tx, rd,
+ niov, kiov, offset, mlen);
if (rc != 0) {
CERROR("Can't setup PUT sink for %s: %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
int ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
int ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, unsigned int niov,
- struct kvec *iov, struct bio_vec *kiov,
+ struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen);
int ksocknal_accept(struct lnet_ni *ni, struct socket *sock);
int
ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
- int delayed, unsigned int niov, struct kvec *iov,
+ int delayed, unsigned int niov,
struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
unsigned int rlen)
{
conn->ksnc_rx_nob_wanted = mlen;
conn->ksnc_rx_nob_left = rlen;
- if (mlen == 0 || iov != NULL) {
- conn->ksnc_rx_nkiov = 0;
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
- conn->ksnc_rx_niov =
- lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
- niov, iov, offset, mlen);
- } else {
- conn->ksnc_rx_niov = 0;
- conn->ksnc_rx_iov = NULL;
- conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
- conn->ksnc_rx_nkiov =
- lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
- niov, kiov, offset, mlen);
- }
+ if (mlen == 0) {
+ conn->ksnc_rx_nkiov = 0;
+ conn->ksnc_rx_kiov = NULL;
+ conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
+ conn->ksnc_rx_niov =
+ lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
+ niov, NULL, offset, mlen);
+ } else {
+ conn->ksnc_rx_niov = 0;
+ conn->ksnc_rx_iov = NULL;
+ conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
+ conn->ksnc_rx_nkiov =
+ lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
+ niov, kiov, offset, mlen);
+ }
LASSERT (mlen ==
lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
}
rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
- niov, NULL, kiov, offset, mlen,
+ niov, kiov, offset, mlen,
rlen);
if (rc < 0)
lnet_finalize(msg, rc);
static int
lolnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, unsigned int niov,
- struct kvec *iov, struct bio_vec *kiov,
+ struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
struct lnet_msg *sendmsg = private;
- if (lntmsg != NULL) { /* not discarding */
- if (iov != NULL)
- lnet_copy_kiov2iov(niov, iov, offset,
- sendmsg->msg_niov,
- sendmsg->msg_kiov,
- sendmsg->msg_offset, mlen);
- else
- lnet_copy_kiov2kiov(niov, kiov, offset,
- sendmsg->msg_niov,
- sendmsg->msg_kiov,
- sendmsg->msg_offset, mlen);
+ if (lntmsg) { /* not discarding */
+ lnet_copy_kiov2kiov(niov, kiov, offset,
+ sendmsg->msg_niov,
+ sendmsg->msg_kiov,
+ sendmsg->msg_offset, mlen);
lnet_finalize(lntmsg, 0);
}