Whamcloud - gitweb
LU-13004 gnilnd: remove support for GNILND_BUF_VIRT_*
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd_cb.c
index 36127bc..42b1cd3 100644 (file)
@@ -301,8 +301,8 @@ kgnilnd_alloc_tx (void)
 #define _kgnilnd_cksum(seed, ptr, nob)  csum_partial(ptr, nob, seed)
 
 /* we don't use offset as every one is passing a buffer reference that already
- * includes the offset into the base address -
- *  see kgnilnd_setup_virt_buffer and kgnilnd_setup_immediate_buffer */
+ * includes the offset into the base address.
+ */
 static inline __u16
 kgnilnd_cksum(void *ptr, size_t nob)
 {
@@ -511,9 +511,9 @@ kgnilnd_nak_rdma(kgn_conn_t *conn, int rx_type, int error, __u64 cookie, lnet_ni
        kgnilnd_queue_tx(conn, tx);
 }
 
-int
+static int
 kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
-                              struct kvec *iov, struct bio_vec *kiov,
+                              struct bio_vec *kiov,
                               unsigned int offset, unsigned int nob)
 {
        kgn_msg_t       *msg = &tx->tx_msg;
@@ -526,7 +526,7 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
 
        if (nob == 0) {
                tx->tx_buffer = NULL;
-       } else if (kiov != NULL) {
+       } else {
 
                if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) {
                        niov = round_up(nob + offset + kiov->bv_offset,
@@ -534,8 +534,8 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
                }
 
                LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
-                       "bad niov %d msg %p kiov %p iov %p offset %d nob%d\n",
-                       niov, msg, kiov, iov, offset, nob);
+                       "bad niov %d msg %p kiov %p offset %d nob%d\n",
+                       niov, msg, kiov, offset, nob);
 
                while (offset >= kiov->bv_len) {
                        offset -= kiov->bv_len;
@@ -591,29 +591,6 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
                tx->tx_buftype = GNILND_BUF_IMMEDIATE_KIOV;
                tx->tx_nob = nob;
 
-       } else {
-               /* For now this is almost identical to kgnilnd_setup_virt_buffer, but we
-                * could "flatten" the payload into a single contiguous buffer ready
-                * for sending direct over an FMA if we ever needed to. */
-
-               LASSERT(niov > 0);
-
-               while (offset >= iov->iov_len) {
-                       offset -= iov->iov_len;
-                       niov--;
-                       iov++;
-                       LASSERT(niov > 0);
-               }
-
-               if (nob > iov->iov_len - offset) {
-                       CERROR("Can't handle multiple vaddr fragments\n");
-                       return -EMSGSIZE;
-               }
-
-               tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
-
-               tx->tx_buftype = GNILND_BUF_IMMEDIATE;
-               tx->tx_nob = nob;
        }
 
        /* checksum payload early - it shouldn't be changing after lnd_send */
@@ -634,34 +611,6 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
 }
 
 int
-kgnilnd_setup_virt_buffer(kgn_tx_t *tx,
-                         unsigned int niov, struct kvec *iov,
-                         unsigned int offset, unsigned int nob)
-
-{
-       LASSERT(nob > 0);
-       LASSERT(niov > 0);
-       LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
-
-       while (offset >= iov->iov_len) {
-               offset -= iov->iov_len;
-               niov--;
-               iov++;
-               LASSERT(niov > 0);
-       }
-
-       if (nob > iov->iov_len - offset) {
-               CERROR("Can't handle multiple vaddr fragments\n");
-               return -EMSGSIZE;
-       }
-
-       tx->tx_buftype = GNILND_BUF_VIRT_UNMAPPED;
-       tx->tx_nob = nob;
-       tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
-       return 0;
-}
-
-int
 kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, struct bio_vec *kiov,
                          unsigned int offset, unsigned int nob)
 {
@@ -776,21 +725,10 @@ error:
 
 static inline int
 kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
-                         struct kvec *iov, struct bio_vec *kiov,
+                         struct bio_vec *kiov,
                          unsigned int offset, unsigned int nob)
 {
-       int     rc;
-
-       LASSERTF((iov == NULL) != (kiov == NULL), "iov 0x%p, kiov 0x%p, tx 0x%p,"
-                                               " offset %d, nob %d, niov %d\n"
-                                               , iov, kiov, tx, offset, nob, niov);
-
-       if (kiov != NULL) {
-               rc = kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
-       } else {
-               rc = kgnilnd_setup_virt_buffer(tx, niov, iov, offset, nob);
-       }
-       return rc;
+       return kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
 }
 
 /* kgnilnd_parse_lnet_rdma()
@@ -966,12 +904,6 @@ kgnilnd_mem_add_map_list(kgn_device_t *dev, kgn_tx_t *tx)
                dev->gnd_map_nphys++;
                dev->gnd_map_physnop += tx->tx_phys_npages;
                break;
-
-       case GNILND_BUF_VIRT_MAPPED:
-               bytes = tx->tx_nob;
-               dev->gnd_map_nvirt++;
-               dev->gnd_map_virtnob += tx->tx_nob;
-               break;
        }
 
        if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
@@ -1015,12 +947,6 @@ kgnilnd_mem_del_map_list(kgn_device_t *dev, kgn_tx_t *tx)
                dev->gnd_map_nphys--;
                dev->gnd_map_physnop -= tx->tx_phys_npages;
                break;
-
-       case GNILND_BUF_VIRT_UNMAPPED:
-               bytes = tx->tx_nob;
-               dev->gnd_map_nvirt--;
-               dev->gnd_map_virtnob -= tx->tx_nob;
-               break;
        }
 
        if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
@@ -1073,7 +999,6 @@ kgnilnd_map_buffer(kgn_tx_t *tx)
        case GNILND_BUF_IMMEDIATE:
        case GNILND_BUF_IMMEDIATE_KIOV:
        case GNILND_BUF_PHYS_MAPPED:
-       case GNILND_BUF_VIRT_MAPPED:
                return 0;
 
        case GNILND_BUF_PHYS_UNMAPPED:
@@ -1086,41 +1011,16 @@ kgnilnd_map_buffer(kgn_tx_t *tx)
                 * - this needs to turn into a non-fatal error soon to allow
                 *  GART resource, etc starvation handling */
                if (rrc != GNI_RC_SUCCESS) {
-                       GNIDBG_TX(D_NET, tx, "Can't map %d pages: dev %d "
-                               "phys %u pp %u, virt %u nob %llu",
+                       GNIDBG_TX(D_NET, tx,
+                                 "Can't map %d pages: dev %d phys %u pp %u",
                                tx->tx_phys_npages, dev->gnd_id,
-                               dev->gnd_map_nphys, dev->gnd_map_physnop,
-                               dev->gnd_map_nvirt, dev->gnd_map_virtnob);
+                               dev->gnd_map_nphys, dev->gnd_map_physnop);
                        RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
                }
 
                tx->tx_buftype = GNILND_BUF_PHYS_MAPPED;
                kgnilnd_mem_add_map_list(dev, tx);
                return 0;
-
-       case GNILND_BUF_VIRT_UNMAPPED:
-               rrc = kgnilnd_mem_register(dev->gnd_handle,
-                       (__u64)tx->tx_buffer, tx->tx_nob,
-                       NULL, flags, &tx->tx_map_key);
-               if (rrc != GNI_RC_SUCCESS) {
-                       GNIDBG_TX(D_NET, tx, "Can't map %u bytes: dev %d "
-                               "phys %u pp %u, virt %u nob %llu",
-                               tx->tx_nob, dev->gnd_id,
-                               dev->gnd_map_nphys, dev->gnd_map_physnop,
-                               dev->gnd_map_nvirt, dev->gnd_map_virtnob);
-                       RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
-               }
-
-               tx->tx_buftype = GNILND_BUF_VIRT_MAPPED;
-               kgnilnd_mem_add_map_list(dev, tx);
-               if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
-                   tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
-                       atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_out);
-                       GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %ld\n",
-                              tx->tx_nob, atomic64_read(&dev->gnd_rdmaq_bytes_out));
-               }
-
-               return 0;
        }
 }
 
@@ -1164,8 +1064,6 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
        /* code below relies on +1 relationship ... */
        BUILD_BUG_ON(GNILND_BUF_PHYS_MAPPED !=
                     (GNILND_BUF_PHYS_UNMAPPED + 1));
-       BUILD_BUG_ON(GNILND_BUF_VIRT_MAPPED !=
-                    (GNILND_BUF_VIRT_UNMAPPED + 1));
 
        switch (tx->tx_buftype) {
        default:
@@ -1174,7 +1072,6 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
        case GNILND_BUF_NONE:
        case GNILND_BUF_IMMEDIATE:
        case GNILND_BUF_PHYS_UNMAPPED:
-       case GNILND_BUF_VIRT_UNMAPPED:
                break;
        case GNILND_BUF_IMMEDIATE_KIOV:
                if (tx->tx_phys != NULL) {
@@ -1188,7 +1085,6 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
                break;
 
        case GNILND_BUF_PHYS_MAPPED:
-       case GNILND_BUF_VIRT_MAPPED:
                LASSERT(tx->tx_conn != NULL);
 
                dev = tx->tx_conn->gnc_device;
@@ -2180,7 +2076,7 @@ kgnilnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
                        goto out;
                }
                rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
-                                              NULL, lntmsg->msg_md->md_kiov,
+                                              lntmsg->msg_md->md_kiov,
                                               0, lntmsg->msg_md->md_length);
                if (rc != 0) {
                        CERROR("unable to setup buffer: %d\n", rc);
@@ -2224,7 +2120,7 @@ kgnilnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
                        goto out;
                }
 
-               rc = kgnilnd_setup_rdma_buffer(tx, niov, NULL,
+               rc = kgnilnd_setup_rdma_buffer(tx, niov,
                                               kiov, offset, nob);
                if (rc != 0) {
                        kgnilnd_tx_done(tx, rc);
@@ -2307,7 +2203,7 @@ kgnilnd_setup_rdma(struct lnet_ni *ni, kgn_rx_t *rx, struct lnet_msg *lntmsg, in
        if (rc != 0)
                goto failed_1;
 
-       rc = kgnilnd_setup_rdma_buffer(tx, niov, NULL, kiov, offset, nob);
+       rc = kgnilnd_setup_rdma_buffer(tx, niov, kiov, offset, nob);
        if (rc != 0)
                goto failed_1;
 
@@ -2513,16 +2409,10 @@ kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
                        }
                }
 
-               if (kiov != NULL)
-                       lnet_copy_flat2kiov(
-                               niov, kiov, offset,
-                               *kgnilnd_tunables.kgn_max_immediate,
-                               &rxmsg[1], 0, mlen);
-               else
-                       lnet_copy_flat2iov(
-                               niov, NULL, offset,
-                               *kgnilnd_tunables.kgn_max_immediate,
-                               &rxmsg[1], 0, mlen);
+               lnet_copy_flat2kiov(
+                       niov, kiov, offset,
+                       *kgnilnd_tunables.kgn_max_immediate,
+                       &rxmsg[1], 0, mlen);
 
                kgnilnd_consume_rx(rx);
                lnet_finalize(lntmsg, 0);
@@ -2554,7 +2444,7 @@ kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
                        GOTO(nak_put_req, rc);
                }
 
-               rc = kgnilnd_setup_rdma_buffer(tx, niov, NULL,
+               rc = kgnilnd_setup_rdma_buffer(tx, niov,
                                               kiov, offset, mlen);
                if (rc != 0) {
                        GOTO(nak_put_req, rc);
@@ -2615,13 +2505,11 @@ nak_put_req:
                        if (rc != 0)
                                GOTO(nak_get_req_rev, rc);
 
-
-                       rc = kgnilnd_setup_rdma_buffer(tx, niov, NULL,
+                       rc = kgnilnd_setup_rdma_buffer(tx, niov,
                                                       kiov, offset, mlen);
                        if (rc != 0)
                                GOTO(nak_get_req_rev, rc);
 
-
                        tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
                                rxmsg->gnm_u.putreq.gnprm_cookie;
                        tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
@@ -4476,8 +4364,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                if (tx == NULL)
                        break;
 
-               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
-                              tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
                               "bad tx buftype %d", tx->tx_buftype);
 
                kgnilnd_finalize_rx_done(tx, msg);
@@ -4495,8 +4382,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                if (tx == NULL)
                        break;
 
-               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
-                              tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
                               "bad tx buftype %d", tx->tx_buftype);
 
                kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
@@ -4508,8 +4394,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                if (tx == NULL)
                        break;
 
-               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
-                              tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
                               "bad tx buftype %d", tx->tx_buftype);
 
                lnet_set_reply_msg_len(net->gnn_ni, tx->tx_lntmsg[1],
@@ -4523,8 +4408,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                if (tx == NULL)
                        break;
 
-               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
-                               tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
                                "bad tx buftype %d", tx->tx_buftype);
 
                kgnilnd_finalize_rx_done(tx, msg);
@@ -4537,8 +4421,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                if (tx == NULL)
                        break;
 
-               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
-                              tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
                               "bad tx buftype %d", tx->tx_buftype);
 
                kgnilnd_finalize_rx_done(tx, msg);
@@ -4550,8 +4433,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                if (tx == NULL)
                        break;
 
-               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
-                              tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
                                "bad tx buftype %d", tx->tx_buftype);
 
                kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
@@ -4854,11 +4736,9 @@ kgnilnd_process_mapped_tx(kgn_device_t *dev)
                } else {
                       GNIDBG_TX(log_retrans_level, tx,
                                "transient map failure #%d %d pages/%d bytes phys %u@%u "
-                               "virt %u@%llu "
                                "nq_map %d mdd# %d/%d GART %ld",
                                dev->gnd_map_attempt, tx->tx_phys_npages, tx->tx_nob,
                                dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
-                               dev->gnd_map_nvirt, dev->gnd_map_virtnob,
                                atomic_read(&dev->gnd_nq_map),
                                atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held),
                                atomic64_read(&dev->gnd_nbytes_map));