From: Mr NeilBrown Date: Thu, 21 Nov 2019 05:53:57 +0000 (+1100) Subject: LU-13004 modules: replace lnet_kiov_t with struct bio_vec X-Git-Tag: 2.13.53~45 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=7a74d382d5e8867785f662aede54a3e399168325 LU-13004 modules: replace lnet_kiov_t with struct bio_vec lnet_kiov_t has the same structure and general purpose as struct bio_vec - only the names a different. The difference brings no value - so let's remove it. sed -e 's/lnet_kiov_t/struct biovec/g' -e 's/kiov_page/bv_page/g' -e 's/kiov_len/bv_len/g' -e 's/kiov_offset/bv_offset/g' plus some cleaning up. Test-Parameters: trivial Signed-off-by: Mr NeilBrown Change-Id: Ia718e5880d0fbbb9aa9aed0edf48a2b2c42b54d3 Reviewed-on: https://review.whamcloud.com/37852 Reviewed-by: James Simmons Reviewed-by: Shaun Tancheff Tested-by: jenkins Tested-by: Maloo Reviewed-by: Oleg Drokin --- diff --git a/libcfs/include/libcfs/linux/linux-misc.h b/libcfs/include/libcfs/linux/linux-misc.h index ab1e2ff..87bfda1 100644 --- a/libcfs/include/libcfs/linux/linux-misc.h +++ b/libcfs/include/libcfs/linux/linux-misc.h @@ -34,6 +34,10 @@ #define __LIBCFS_LINUX_MISC_H__ #include +/* Since Commit 2f8b544477e6 ("block,fs: untangle fs.h and blk_types.h") + * fs.h doesn't include blk_types.h, but we need it. + */ +#include #include #include #include diff --git a/lnet/include/lnet/lib-lnet.h b/lnet/include/lnet/lib-lnet.h index 031f9af..3a4e042 100644 --- a/lnet/include/lnet/lib-lnet.h +++ b/lnet/include/lnet/lib-lnet.h @@ -699,10 +699,10 @@ int lnet_extract_iov(int dst_niov, struct kvec *dst, int src_niov, struct kvec *src, unsigned int offset, unsigned int len); -unsigned int lnet_kiov_nob (unsigned int niov, lnet_kiov_t *iov); -int lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, - int src_niov, lnet_kiov_t *src, - unsigned int offset, unsigned int len); +unsigned int lnet_kiov_nob(unsigned int niov, struct bio_vec *iov); +int lnet_extract_kiov(int dst_niov, struct bio_vec *dst, + int src_niov, struct bio_vec *src, + unsigned int offset, unsigned int len); void lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset, @@ -710,15 +710,15 @@ void lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int soffset, unsigned int nob); void lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, - unsigned int nkiov, lnet_kiov_t *kiov, + unsigned int nkiov, struct bio_vec *kiov, unsigned int kiovoffset, unsigned int nob); -void lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, +void lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov, unsigned int kiovoffset, unsigned int niov, struct kvec *iov, unsigned int iovoffset, unsigned int nob); -void lnet_copy_kiov2kiov(unsigned int ndkiov, lnet_kiov_t *dkiov, +void lnet_copy_kiov2kiov(unsigned int ndkiov, struct bio_vec *dkiov, unsigned int doffset, - unsigned int nskiov, lnet_kiov_t *skiov, + unsigned int nskiov, struct bio_vec *skiov, unsigned int soffset, unsigned int nob); static inline void @@ -734,7 +734,7 @@ lnet_copy_iov2flat(int dlen, void *dest, unsigned int doffset, static inline void lnet_copy_kiov2flat(int dlen, void *dest, unsigned int doffset, - unsigned int nsiov, lnet_kiov_t *skiov, + unsigned int nsiov, struct bio_vec *skiov, unsigned int soffset, unsigned int nob) { struct kvec diov = { .iov_base = dest, .iov_len = dlen }; @@ -754,7 +754,7 @@ lnet_copy_flat2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset, } static inline void -lnet_copy_flat2kiov(unsigned int ndiov, lnet_kiov_t *dkiov, +lnet_copy_flat2kiov(unsigned int ndiov, struct bio_vec *dkiov, unsigned int doffset, int slen, void *src, unsigned int soffset, unsigned int nob) { diff --git a/lnet/include/lnet/lib-types.h b/lnet/include/lnet/lib-types.h index ad80165..ee2dc20 100644 --- a/lnet/include/lnet/lib-types.h +++ b/lnet/include/lnet/lib-types.h @@ -168,7 +168,7 @@ struct lnet_msg { unsigned int msg_offset; unsigned int msg_niov; struct kvec *msg_iov; - lnet_kiov_t *msg_kiov; + struct bio_vec *msg_kiov; struct lnet_event msg_ev; struct lnet_hdr msg_hdr; @@ -218,7 +218,7 @@ struct lnet_libmd { struct lnet_handle_md md_bulk_handle; union { struct kvec iov[LNET_MAX_IOV]; - lnet_kiov_t kiov[LNET_MAX_IOV]; + struct bio_vec kiov[LNET_MAX_IOV]; } md_iov; }; @@ -276,7 +276,7 @@ struct lnet_lnd { * credit if the LND does flow control. */ int (*lnd_recv)(struct lnet_ni *ni, void *private, struct lnet_msg *msg, int delayed, unsigned int niov, - struct kvec *iov, lnet_kiov_t *kiov, + struct kvec *iov, struct bio_vec *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen); /* lnet_parse() has had to delay processing of this message @@ -857,7 +857,7 @@ struct lnet_rtrbufpool { struct lnet_rtrbuf { struct list_head rb_list; /* chain on rbp_bufs */ struct lnet_rtrbufpool *rb_pool; /* owning pool */ - lnet_kiov_t rb_kiov[0]; /* the buffer space */ + struct bio_vec rb_kiov[0]; /* the buffer space */ }; #define LNET_PEER_HASHSIZE 503 /* prime! */ diff --git a/lnet/include/uapi/linux/lnet/lnet-types.h b/lnet/include/uapi/linux/lnet/lnet-types.h index e6ee09f..3134f38 100644 --- a/lnet/include/uapi/linux/lnet/lnet-types.h +++ b/lnet/include/uapi/linux/lnet/lnet-types.h @@ -403,9 +403,9 @@ struct lnet_md { * Specify the memory region associated with the memory descriptor. * If the options field has: * - LNET_MD_KIOV bit set: The start field points to the starting - * address of an array of lnet_kiov_t and the length field specifies + * address of an array of struct bio_vec and the length field specifies * the number of entries in the array. The length can't be bigger - * than LNET_MAX_IOV. The lnet_kiov_t is used to describe page-based + * than LNET_MAX_IOV. The struct bio_vec is used to describe page-based * fragments that are not necessarily mapped in virtal memory. * - LNET_MD_IOVEC bit set: The start field points to the starting * address of an array of struct kvec and the length field specifies @@ -465,7 +465,7 @@ struct lnet_md { * acknowledgment. Acknowledgments are never sent for GET operations. * The data sent in the REPLY serves as an implicit acknowledgment. * - LNET_MD_KIOV: The start and length fields specify an array of - * lnet_kiov_t. + * struct bio_vec. * - LNET_MD_IOVEC: The start and length fields specify an array of * struct iovec. * - LNET_MD_MAX_SIZE: The max_size field is valid. @@ -537,21 +537,6 @@ struct lnet_md { /** Infinite threshold on MD operations. See struct lnet_md::threshold */ #define LNET_MD_THRESH_INF (-1) -/** - * A page-based fragment of a MD. - */ -typedef struct { - /** Pointer to the page where the fragment resides */ - struct page *kiov_page; - /** Length in bytes of the fragment */ - unsigned int kiov_len; - /** - * Starting offset of the fragment within the page. Note that the - * end of the fragment must not pass the end of the page; i.e., - * kiov_len + kiov_offset <= PAGE_SIZE. - */ - unsigned int kiov_offset; -} lnet_kiov_t; /** @} lnet_md */ /** \addtogroup lnet_eq diff --git a/lnet/klnds/gnilnd/gnilnd.h b/lnet/klnds/gnilnd/gnilnd.h index 0389887..b9333de 100644 --- a/lnet/klnds/gnilnd/gnilnd.h +++ b/lnet/klnds/gnilnd/gnilnd.h @@ -1787,10 +1787,11 @@ int kgnilnd_eager_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, void **new_private); int kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, int delayed, unsigned int niov, - struct kvec *iov, lnet_kiov_t *kiov, + struct kvec *iov, struct bio_vec *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen); -__u16 kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int offset, unsigned int nob, int dump_blob); +__u16 kgnilnd_cksum_kiov(unsigned int nkiov, struct bio_vec *kiov, + unsigned int offset, unsigned int nob, int dump_blob); /* purgatory functions */ void kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer); diff --git a/lnet/klnds/gnilnd/gnilnd_cb.c b/lnet/klnds/gnilnd/gnilnd_cb.c index d6358ff..ea712dc 100644 --- a/lnet/klnds/gnilnd/gnilnd_cb.c +++ b/lnet/klnds/gnilnd/gnilnd_cb.c @@ -321,8 +321,8 @@ kgnilnd_cksum(void *ptr, size_t nob) } __u16 -kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov, - unsigned int offset, unsigned int nob, int dump_blob) +kgnilnd_cksum_kiov(unsigned int nkiov, struct bio_vec *kiov, + unsigned int offset, unsigned int nob, int dump_blob) { __wsum cksum = 0; __wsum tmpck; @@ -339,15 +339,15 @@ kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov, /* if loops changes, please change kgnilnd_setup_phys_buffer */ - while (offset >= kiov->kiov_len) { - offset -= kiov->kiov_len; + while (offset >= kiov->bv_len) { + offset -= kiov->bv_len; nkiov--; kiov++; LASSERT(nkiov > 0); } - /* ignore nob here, if nob < (kiov_len - offset), kiov == 1 */ - odd = (unsigned long) (kiov[0].kiov_len - offset) & 1; + /* ignore nob here, if nob < (bv_len - offset), kiov == 1 */ + odd = (unsigned long) (kiov[0].bv_len - offset) & 1; if ((odd || *kgnilnd_tunables.kgn_vmap_cksum) && nkiov > 1) { struct page **pages = kgnilnd_data.kgn_cksum_map_pages[get_cpu()]; @@ -356,10 +356,10 @@ kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov, get_cpu(), kgnilnd_data.kgn_cksum_map_pages); CDEBUG(D_BUFFS, "odd %d len %u offset %u nob %u\n", - odd, kiov[0].kiov_len, offset, nob); + odd, kiov[0].bv_len, offset, nob); for (i = 0; i < nkiov; i++) { - pages[i] = kiov[i].kiov_page; + pages[i] = kiov[i].bv_page; } addr = vmap(pages, nkiov, VM_MAP, PAGE_KERNEL); @@ -372,42 +372,46 @@ kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov, } atomic_inc(&kgnilnd_data.kgn_nvmap_cksum); - tmpck = _kgnilnd_cksum(0, (void *) addr + kiov[0].kiov_offset + offset, nob); + tmpck = _kgnilnd_cksum(0, ((void *) addr + kiov[0].bv_offset + + offset), nob); cksum = tmpck; if (dump_blob) { kgnilnd_dump_blob(D_BUFFS, "flat kiov RDMA payload", - (void *)addr + kiov[0].kiov_offset + offset, nob); + (void *)addr + kiov[0].bv_offset + + offset, nob); } CDEBUG(D_BUFFS, "cksum 0x%x (+0x%x) for addr 0x%p+%u len %u offset %u\n", - cksum, tmpck, addr, kiov[0].kiov_offset, nob, offset); + cksum, tmpck, addr, kiov[0].bv_offset, nob, offset); vunmap(addr); } else { do { - fraglen = min(kiov->kiov_len - offset, nob); + fraglen = min(kiov->bv_len - offset, nob); /* make dang sure we don't send a bogus checksum if somehow we get * an odd length fragment on anything but the last entry in a kiov - * we know from kgnilnd_setup_rdma_buffer that we can't have non * PAGE_SIZE pages in the middle, so if nob < PAGE_SIZE, it is the last one */ LASSERTF(!(fraglen&1) || (nob < PAGE_SIZE), - "odd fraglen %u on nkiov %d, nob %u kiov_len %u offset %u kiov 0x%p\n", - fraglen, nkiov, nob, kiov->kiov_len, offset, kiov); + "odd fraglen %u on nkiov %d, nob %u bv_len %u offset %u kiov 0x%p\n", + fraglen, nkiov, nob, kiov->bv_len, + offset, kiov); - addr = (void *)kmap(kiov->kiov_page) + kiov->kiov_offset + offset; + addr = (void *)kmap(kiov->bv_page) + kiov->bv_offset + + offset; tmpck = _kgnilnd_cksum(cksum, addr, fraglen); CDEBUG(D_BUFFS, "cksum 0x%x (+0x%x) for page 0x%p+%u (0x%p) len %u offset %u\n", - cksum, tmpck, kiov->kiov_page, kiov->kiov_offset, addr, - fraglen, offset); + cksum, tmpck, kiov->bv_page, kiov->bv_offset, + addr, fraglen, offset); cksum = tmpck; if (dump_blob) kgnilnd_dump_blob(D_BUFFS, "kiov cksum", addr, fraglen); - kunmap(kiov->kiov_page); + kunmap(kiov->bv_page); kiov++; nkiov--; @@ -509,7 +513,7 @@ kgnilnd_nak_rdma(kgn_conn_t *conn, int rx_type, int error, __u64 cookie, lnet_ni int kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, - struct kvec *iov, lnet_kiov_t *kiov, + struct kvec *iov, struct bio_vec *kiov, unsigned int offset, unsigned int nob) { kgn_msg_t *msg = &tx->tx_msg; @@ -525,41 +529,45 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, } else if (kiov != NULL) { if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) { - niov = ((nob + offset + kiov->kiov_offset + PAGE_SIZE - 1) / - PAGE_SIZE); + niov = round_up(nob + offset + kiov->bv_offset, + PAGE_SIZE); } LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE, "bad niov %d msg %p kiov %p iov %p offset %d nob%d\n", niov, msg, kiov, iov, offset, nob); - while (offset >= kiov->kiov_len) { - offset -= kiov->kiov_len; + while (offset >= kiov->bv_len) { + offset -= kiov->bv_len; niov--; kiov++; LASSERT(niov > 0); } for (i = 0; i < niov; i++) { - /* We can't have a kiov_offset on anything but the first entry, - * otherwise we'll have a hole at the end of the mapping as we only map - * whole pages. - * Also, if we have a kiov_len < PAGE_SIZE but we need to map more - * than kiov_len, we will also have a whole at the end of that page - * which isn't allowed */ - if ((kiov[i].kiov_offset != 0 && i > 0) || - (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) { - CNETERR("Can't make payload contiguous in I/O VM: page %d, offset %u, nob %u, kiov_offset %u, kiov_len %u\n", - i, offset, nob, kiov->kiov_offset, kiov->kiov_len); + /* We can't have a bv_offset on anything but the first + * entry, otherwise we'll have a hole at the end of the + * mapping as we only map whole pages. + * Also, if we have a bv_len < PAGE_SIZE but we need to + * map more than bv_len, we will also have a whole at + * the end of that page which isn't allowed + */ + if ((kiov[i].bv_offset != 0 && i > 0) || + (kiov[i].bv_offset + kiov[i].bv_len != PAGE_SIZE && + i < niov - 1)) { + CNETERR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n", + i, offset, nob, kiov->bv_offset, + kiov->bv_len); RETURN(-EINVAL); } - tx->tx_imm_pages[i] = kiov[i].kiov_page; + tx->tx_imm_pages[i] = kiov[i].bv_page; } /* hijack tx_phys for the later unmap */ if (niov == 1) { /* tx->phyx being equal to NULL is the signal for unmap to discern between kmap and vmap */ tx->tx_phys = NULL; - tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) + kiov[0].kiov_offset + offset; + tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) + + kiov[0].bv_offset + offset; atomic_inc(&kgnilnd_data.kgn_nkmap_short); GNIDBG_TX(D_NET, tx, "kmapped page for %d bytes for kiov 0x%p, buffer 0x%p", nob, kiov, tx->tx_buffer); @@ -571,10 +579,14 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, } atomic_inc(&kgnilnd_data.kgn_nvmap_short); - /* make sure we take into account the kiov offset as the start of the buffer */ - tx->tx_buffer = (void *)tx->tx_phys + kiov[0].kiov_offset + offset; - GNIDBG_TX(D_NET, tx, "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p", - niov, nob, kiov, tx->tx_phys, tx->tx_buffer); + /* make sure we take into account the kiov offset as the + * start of the buffer + */ + tx->tx_buffer = (void *)tx->tx_phys + kiov[0].bv_offset + + offset; + GNIDBG_TX(D_NET, tx, + "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p", + niov, nob, kiov, tx->tx_phys, tx->tx_buffer); } tx->tx_buftype = GNILND_BUF_IMMEDIATE_KIOV; tx->tx_nob = nob; @@ -650,7 +662,7 @@ kgnilnd_setup_virt_buffer(kgn_tx_t *tx, } int -kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov, +kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, struct bio_vec *kiov, unsigned int offset, unsigned int nob) { gni_mem_segment_t *phys; @@ -678,8 +690,8 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov, /* if loops changes, please change kgnilnd_cksum_kiov * and kgnilnd_setup_immediate_buffer */ - while (offset >= kiov->kiov_len) { - offset -= kiov->kiov_len; + while (offset >= kiov->bv_len) { + offset -= kiov->bv_len; nkiov--; kiov++; LASSERT(nkiov > 0); @@ -691,30 +703,31 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov, tx->tx_buftype = GNILND_BUF_PHYS_UNMAPPED; tx->tx_nob = nob; - /* kiov_offset is start of 'valid' buffer, so index offset past that */ - tx->tx_buffer = (void *)((unsigned long)(kiov->kiov_offset + offset)); + /* bv_offset is start of 'valid' buffer, so index offset past that */ + tx->tx_buffer = (void *)((unsigned long)(kiov->bv_offset + offset)); phys = tx->tx_phys; CDEBUG(D_NET, "tx 0x%p buffer 0x%p map start kiov 0x%p+%u niov %d offset %u\n", - tx, tx->tx_buffer, kiov, kiov->kiov_offset, nkiov, offset); + tx, tx->tx_buffer, kiov, kiov->bv_offset, nkiov, offset); do { - fraglen = min(kiov->kiov_len - offset, nob); - - /* We can't have a kiov_offset on anything but the first entry, - * otherwise we'll have a hole at the end of the mapping as we only map - * whole pages. Only the first page is allowed to have an offset - - * we'll add that into tx->tx_buffer and that will get used when we - * map in the segments (see kgnilnd_map_buffer). - * Also, if we have a kiov_len < PAGE_SIZE but we need to map more - * than kiov_len, we will also have a whole at the end of that page - * which isn't allowed */ + fraglen = min(kiov->bv_len - offset, nob); + + /* We can't have a bv_offset on anything but the first entry, + * otherwise we'll have a hole at the end of the mapping as we + * only map whole pages. Only the first page is allowed to + * have an offset - we'll add that into tx->tx_buffer and that + * will get used when we map in the segments (see + * kgnilnd_map_buffer). Also, if we have a bv_len < PAGE_SIZE + * but we need to map more than bv_len, we will also have a + * whole at the end of that page which isn't allowed + */ if ((phys != tx->tx_phys) && - ((kiov->kiov_offset != 0) || - ((kiov->kiov_len < PAGE_SIZE) && (nob > kiov->kiov_len)))) { - CERROR("Can't make payload contiguous in I/O VM: page %d, offset %u, nob %u, kiov_offset %u, kiov_len %u\n", + ((kiov->bv_offset != 0) || + ((kiov->bv_len < PAGE_SIZE) && (nob > kiov->bv_len)))) { + CERROR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n", (int)(phys - tx->tx_phys), - offset, nob, kiov->kiov_offset, kiov->kiov_len); + offset, nob, kiov->bv_offset, kiov->bv_len); rc = -EINVAL; GOTO(error, rc); } @@ -730,11 +743,12 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov, GOTO(error, rc); } - CDEBUG(D_BUFFS, "page 0x%p kiov_offset %u kiov_len %u nob %u " - "nkiov %u offset %u\n", - kiov->kiov_page, kiov->kiov_offset, kiov->kiov_len, nob, nkiov, offset); + CDEBUG(D_BUFFS, + "page 0x%p bv_offset %u bv_len %u nob %u nkiov %u offset %u\n", + kiov->bv_page, kiov->bv_offset, kiov->bv_len, nob, nkiov, + offset); - phys->address = page_to_phys(kiov->kiov_page); + phys->address = page_to_phys(kiov->bv_page); phys++; kiov++; nkiov--; @@ -762,7 +776,7 @@ error: static inline int kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov, - struct kvec *iov, lnet_kiov_t *kiov, + struct kvec *iov, struct bio_vec *kiov, unsigned int offset, unsigned int nob) { int rc; @@ -792,7 +806,7 @@ kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov, static void kgnilnd_parse_lnet_rdma(struct lnet_msg *lntmsg, unsigned int *niov, unsigned int *offset, unsigned int *nob, - lnet_kiov_t **kiov, int put_len) + struct bio_vec **kiov, int put_len) { /* GETs are weird, see kgnilnd_send */ if (lntmsg->msg_type == LNET_MSG_GET) { @@ -815,10 +829,10 @@ kgnilnd_parse_lnet_rdma(struct lnet_msg *lntmsg, unsigned int *niov, static inline void kgnilnd_compute_rdma_cksum(kgn_tx_t *tx, int put_len) { - unsigned int niov, offset, nob; - lnet_kiov_t *kiov; - struct lnet_msg *lntmsg = tx->tx_lntmsg[0]; - int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1); + unsigned int niov, offset, nob; + struct bio_vec *kiov; + struct lnet_msg *lntmsg = tx->tx_lntmsg[0]; + int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1); GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE) || (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE) || @@ -868,8 +882,8 @@ kgnilnd_verify_rdma_cksum(kgn_tx_t *tx, __u16 rx_cksum, int put_len) int rc = 0; __u16 cksum; unsigned int niov, offset, nob; - lnet_kiov_t *kiov; - struct lnet_msg *lntmsg = tx->tx_lntmsg[0]; + struct bio_vec *kiov; + struct lnet_msg *lntmsg = tx->tx_lntmsg[0]; int dump_on_err = *kgnilnd_tunables.kgn_checksum_dump; /* we can only match certain requests */ @@ -2105,7 +2119,7 @@ kgnilnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) int routing = lntmsg->msg_routing; unsigned int niov = lntmsg->msg_niov; struct kvec *iov = lntmsg->msg_iov; - lnet_kiov_t *kiov = lntmsg->msg_kiov; + struct bio_vec *kiov = lntmsg->msg_kiov; unsigned int offset = lntmsg->msg_offset; unsigned int nob = lntmsg->msg_len; unsigned int msg_vmflush = lntmsg->msg_vmflush; @@ -2274,7 +2288,7 @@ kgnilnd_setup_rdma(struct lnet_ni *ni, kgn_rx_t *rx, struct lnet_msg *lntmsg, in kgn_msg_t *rxmsg = rx->grx_msg; unsigned int niov = lntmsg->msg_niov; struct kvec *iov = lntmsg->msg_iov; - lnet_kiov_t *kiov = lntmsg->msg_kiov; + struct bio_vec *kiov = lntmsg->msg_kiov; unsigned int offset = lntmsg->msg_offset; unsigned int nob = lntmsg->msg_len; int done_type; @@ -2422,7 +2436,7 @@ kgnilnd_eager_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, int kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, int delayed, unsigned int niov, - struct kvec *iov, lnet_kiov_t *kiov, + struct kvec *iov, struct bio_vec *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { kgn_rx_t *rx = private; @@ -3120,7 +3134,7 @@ kgnilnd_reaper(void *arg) int kgnilnd_recv_bte_get(kgn_tx_t *tx) { unsigned niov, offset, nob; - lnet_kiov_t *kiov; + struct bio_vec *kiov; struct lnet_msg *lntmsg = tx->tx_lntmsg[0]; kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, tx->tx_nob_rdma); diff --git a/lnet/klnds/gnilnd/gnilnd_proc.c b/lnet/klnds/gnilnd/gnilnd_proc.c index 3cab86a..b9210b8 100644 --- a/lnet/klnds/gnilnd/gnilnd_proc.c +++ b/lnet/klnds/gnilnd/gnilnd_proc.c @@ -40,7 +40,7 @@ static int _kgnilnd_proc_run_cksum_test(int caseno, int nloops, int nob) { - lnet_kiov_t *src, *dest; + struct bio_vec *src, *dest; struct timespec begin, end, diff; int niov; int rc = 0; @@ -57,20 +57,20 @@ _kgnilnd_proc_run_cksum_test(int caseno, int nloops, int nob) } for (i = 0; i < LNET_MAX_IOV; i++) { - src[i].kiov_offset = 0; - src[i].kiov_len = PAGE_SIZE; - src[i].kiov_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + src[i].bv_offset = 0; + src[i].bv_len = PAGE_SIZE; + src[i].bv_page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (src[i].kiov_page == NULL) { + if (src[i].bv_page == NULL) { CERROR("couldn't allocate page %d\n", i); GOTO(unwind, rc = -ENOMEM); } - dest[i].kiov_offset = 0; - dest[i].kiov_len = PAGE_SIZE; - dest[i].kiov_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + dest[i].bv_offset = 0; + dest[i].bv_len = PAGE_SIZE; + dest[i].bv_page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (dest[i].kiov_page == NULL) { + if (dest[i].bv_page == NULL) { CERROR("couldn't allocate page %d\n", i); GOTO(unwind, rc = -ENOMEM); } @@ -85,31 +85,31 @@ _kgnilnd_proc_run_cksum_test(int caseno, int nloops, int nob) } /* setup real data */ - src[0].kiov_offset = 317; - dest[0].kiov_offset = 592; + src[0].bv_offset = 317; + dest[0].bv_offset = 592; switch (caseno) { default: /* odd -> even */ break; case 1: /* odd -> odd */ - dest[0].kiov_offset -= 1; + dest[0].bv_offset -= 1; break; case 2: /* even -> even */ - src[0].kiov_offset += 1; + src[0].bv_offset += 1; break; case 3: /* even -> odd */ - src[0].kiov_offset += 1; - dest[0].kiov_offset -= 1; + src[0].bv_offset += 1; + dest[0].bv_offset -= 1; } - src[0].kiov_len = PAGE_SIZE - src[0].kiov_offset; - dest[0].kiov_len = PAGE_SIZE - dest[0].kiov_offset; + src[0].bv_len = PAGE_SIZE - src[0].bv_offset; + dest[0].bv_len = PAGE_SIZE - dest[0].bv_offset; for (i = 0; i < niov; i++) { - memset(page_address(src[i].kiov_page) + src[i].kiov_offset, - 0xf0 + i, src[i].kiov_len); + memset(page_address(src[i].bv_page) + src[i].bv_offset, + 0xf0 + i, src[i].bv_len); } lnet_copy_kiov2kiov(niov, dest, 0, niov, src, 0, nob); @@ -117,8 +117,10 @@ _kgnilnd_proc_run_cksum_test(int caseno, int nloops, int nob) getnstimeofday(&begin); for (n = 0; n < nloops; n++) { - CDEBUG(D_BUFFS, "case %d loop %d src %d dest %d nob %d niov %d\n", - caseno, n, src[0].kiov_offset, dest[0].kiov_offset, nob, niov); + CDEBUG(D_BUFFS, + "case %d loop %d src %d dest %d nob %d niov %d\n", + caseno, n, src[0].bv_offset, dest[0].bv_offset, nob, + niov); cksum = kgnilnd_cksum_kiov(niov, src, 0, nob - (n % nob), 1); cksum2 = kgnilnd_cksum_kiov(niov, dest, 0, nob - (n % nob), 1); @@ -141,12 +143,11 @@ _kgnilnd_proc_run_cksum_test(int caseno, int nloops, int nob) unwind: CDEBUG(D_NET, "freeing %d pages\n", i); for (i -= 1; i >= 0; i--) { - if (src[i].kiov_page != NULL) { - __free_page(src[i].kiov_page); - } - if (dest[i].kiov_page != NULL) { - __free_page(dest[i].kiov_page); - } + if (src[i].bv_page) + __free_page(src[i].bv_page); + + if (dest[i].bv_page) + __free_page(dest[i].bv_page); } if (src != NULL) diff --git a/lnet/klnds/o2iblnd/o2iblnd.h b/lnet/klnds/o2iblnd/o2iblnd.h index 12c770d..117a436 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.h +++ b/lnet/klnds/o2iblnd/o2iblnd.h @@ -1218,6 +1218,6 @@ int kiblnd_post_rx(struct kib_rx *rx, int credit); int kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg); int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, int delayed, unsigned int niov, struct kvec *iov, - lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, + struct bio_vec *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen); diff --git a/lnet/klnds/o2iblnd/o2iblnd_cb.c b/lnet/klnds/o2iblnd/o2iblnd_cb.c index b7b1cc7..e79f6ce 100644 --- a/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -806,7 +806,7 @@ static int kiblnd_setup_rd_iov(struct lnet_ni *ni, struct kib_tx *tx, static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx, struct kib_rdma_desc *rd, int nkiov, - lnet_kiov_t *kiov, int offset, int nob) + struct bio_vec *kiov, int offset, int nob) { struct kib_net *net = ni->ni_data; struct scatterlist *sg; @@ -819,8 +819,8 @@ static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx, LASSERT(nkiov > 0); LASSERT(net != NULL); - while (offset >= kiov->kiov_len) { - offset -= kiov->kiov_len; + while (offset >= kiov->bv_len) { + offset -= kiov->bv_len; nkiov--; kiov++; LASSERT(nkiov > 0); @@ -832,24 +832,24 @@ static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx, do { LASSERT(nkiov > 0); - fragnob = min((int)(kiov->kiov_len - offset), nob); + fragnob = min((int)(kiov->bv_len - offset), nob); /* * We're allowed to start at a non-aligned page offset in * the first fragment and end at a non-aligned page offset * in the last fragment. */ - if ((fragnob < (int)(kiov->kiov_len - offset)) && + if ((fragnob < (int)(kiov->bv_len - offset)) && nkiov < max_nkiov && nob > fragnob) { CDEBUG(D_NET, "fragnob %d < available page %d: with" " remaining %d kiovs with %d nob left\n", - fragnob, (int)(kiov->kiov_len - offset), + fragnob, (int)(kiov->bv_len - offset), nkiov, nob); tx->tx_gaps = true; } - sg_set_page(sg, kiov->kiov_page, fragnob, - kiov->kiov_offset + offset); + sg_set_page(sg, kiov->bv_page, fragnob, + kiov->bv_offset + offset); sg = sg_next(sg); if (!sg) { CERROR("lacking enough sg entries to map tx\n"); @@ -1646,7 +1646,7 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) int routing = lntmsg->msg_routing; unsigned int payload_niov = lntmsg->msg_niov; struct kvec *payload_iov = lntmsg->msg_iov; - lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; + struct bio_vec *payload_kiov = lntmsg->msg_kiov; unsigned int payload_offset = lntmsg->msg_offset; unsigned int payload_nob = lntmsg->msg_len; struct kib_msg *ibmsg; @@ -1811,13 +1811,13 @@ static void kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg) { struct lnet_process_id target = lntmsg->msg_target; - unsigned int niov = lntmsg->msg_niov; + unsigned int niov = lntmsg->msg_niov; struct kvec *iov = lntmsg->msg_iov; - lnet_kiov_t *kiov = lntmsg->msg_kiov; - unsigned int offset = lntmsg->msg_offset; - unsigned int nob = lntmsg->msg_len; + struct bio_vec *kiov = lntmsg->msg_kiov; + unsigned int offset = lntmsg->msg_offset; + unsigned int nob = lntmsg->msg_len; struct kib_tx *tx; - int rc; + int rc; tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); if (tx == NULL) { @@ -1873,7 +1873,7 @@ failed_0: int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, - int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, + int delayed, unsigned int niov, struct kvec *iov, struct bio_vec *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { struct kib_rx *rx = private; diff --git a/lnet/klnds/socklnd/socklnd.h b/lnet/klnds/socklnd/socklnd.h index 3c3c20a..2b37222 100644 --- a/lnet/klnds/socklnd/socklnd.h +++ b/lnet/klnds/socklnd/socklnd.h @@ -236,11 +236,11 @@ struct ksock_nal_data { /* A packet just assembled for transmission is represented by 1 or more * struct kvec fragments (the first frag contains the portals header), - * followed by 0 or more lnet_kiov_t fragments. + * followed by 0 or more struct bio_vec fragments. * * On the receive side, initially 1 struct kvec fragment is posted for * receive (the header). Once the header has been received, the payload is - * received into either struct kvec or lnet_kiov_t fragments, depending on + * received into either struct kvec or struct bio_vec fragments, depending on * what the header matched or whether the message needs forwarding. */ struct ksock_conn; /* forward ref */ @@ -260,7 +260,7 @@ struct ksock_tx { /* transmit packet */ unsigned short tx_zc_capable:1; /* payload is large enough for ZC */ unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */ unsigned short tx_nonblk:1; /* it's a non-blocking ACK */ - lnet_kiov_t *tx_kiov; /* packet page frags */ + struct bio_vec *tx_kiov; /* packet page frags */ struct ksock_conn *tx_conn; /* owning conn */ struct lnet_msg *tx_lnetmsg; /* lnet message for lnet_finalize() */ time64_t tx_deadline; /* when (in secs) tx times out */ @@ -270,7 +270,7 @@ struct ksock_tx { /* transmit packet */ union { struct { struct kvec iov; /* virt hdr */ - lnet_kiov_t kiov[0]; /* paged payload */ + struct bio_vec kiov[0]; /* paged payload */ } paged; struct { struct kvec iov[1]; /* virt hdr + payload */ @@ -286,7 +286,7 @@ struct ksock_tx { /* transmit packet */ * header, or up to LNET_MAX_IOV frags of payload of either type. */ union ksock_rxiovspace { struct kvec iov[LNET_MAX_IOV]; - lnet_kiov_t kiov[LNET_MAX_IOV]; + struct bio_vec kiov[LNET_MAX_IOV]; }; #define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */ @@ -329,10 +329,11 @@ struct ksock_conn { int ksnc_rx_nob_wanted; /* bytes actually wanted */ int ksnc_rx_niov; /* # kvec frags */ struct kvec *ksnc_rx_iov; /* the kvec frags */ - int ksnc_rx_nkiov; /* # page frags */ - lnet_kiov_t *ksnc_rx_kiov; /* the page frags */ + int ksnc_rx_nkiov; /* # page frags */ + struct bio_vec *ksnc_rx_kiov; /* the page frags */ union ksock_rxiovspace ksnc_rx_iov_space;/* space for frag descriptors */ - __u32 ksnc_rx_csum; /* partial checksum for incoming data */ + __u32 ksnc_rx_csum; /* partial checksum for incoming + * data */ struct lnet_msg *ksnc_lnet_msg; /* rx lnet_finalize arg*/ struct ksock_msg ksnc_msg; /* incoming message buffer: * V2.x message takes the @@ -577,7 +578,7 @@ int ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg); int ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg); int ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, int delayed, unsigned int niov, - struct kvec *iov, lnet_kiov_t *kiov, + struct kvec *iov, struct bio_vec *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen); int ksocknal_accept(struct lnet_ni *ni, struct socket *sock); diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index 39cd22b..e89b911 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -152,7 +152,7 @@ static int ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx, struct kvec *scratch_iov) { - lnet_kiov_t *kiov = tx->tx_kiov; + struct bio_vec *kiov = tx->tx_kiov; int nob; int rc; @@ -173,13 +173,13 @@ ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx, do { LASSERT(tx->tx_nkiov > 0); - if (nob < (int)kiov->kiov_len) { - kiov->kiov_offset += nob; - kiov->kiov_len -= nob; + if (nob < (int)kiov->bv_len) { + kiov->bv_offset += nob; + kiov->bv_len -= nob; return rc; } - nob -= (int)kiov->kiov_len; + nob -= (int)kiov->bv_len; tx->tx_kiov = ++kiov; tx->tx_nkiov--; } while (nob != 0); @@ -302,7 +302,7 @@ static int ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs, struct kvec *scratch_iov) { - lnet_kiov_t *kiov = conn->ksnc_rx_kiov; + struct bio_vec *kiov = conn->ksnc_rx_kiov; int nob; int rc; LASSERT(conn->ksnc_rx_nkiov > 0); @@ -329,13 +329,13 @@ ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs, do { LASSERT(conn->ksnc_rx_nkiov > 0); - if (nob < (int) kiov->kiov_len) { - kiov->kiov_offset += nob; - kiov->kiov_len -= nob; + if (nob < (int) kiov->bv_len) { + kiov->bv_offset += nob; + kiov->bv_len -= nob; return -EAGAIN; } - nob -= kiov->kiov_len; + nob -= kiov->bv_len; conn->ksnc_rx_kiov = ++kiov; conn->ksnc_rx_nkiov--; } while (nob != 0); @@ -985,14 +985,14 @@ ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) int mpflag = 1; int type = lntmsg->msg_type; struct lnet_process_id target = lntmsg->msg_target; - unsigned int payload_niov = lntmsg->msg_niov; + unsigned int payload_niov = lntmsg->msg_niov; struct kvec *payload_iov = lntmsg->msg_iov; - lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; - unsigned int payload_offset = lntmsg->msg_offset; - unsigned int payload_nob = lntmsg->msg_len; - struct ksock_tx *tx; - int desc_size; - int rc; + struct bio_vec *payload_kiov = lntmsg->msg_kiov; + unsigned int payload_offset = lntmsg->msg_offset; + unsigned int payload_nob = lntmsg->msg_len; + struct ksock_tx *tx; + int desc_size; + int rc; /* NB 'private' is different depending on what we're sending. * Just ignore it... */ @@ -1385,7 +1385,7 @@ ksocknal_process_receive(struct ksock_conn *conn, int ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg, int delayed, unsigned int niov, struct kvec *iov, - lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, + struct bio_vec *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { struct ksock_conn *conn = private; diff --git a/lnet/klnds/socklnd/socklnd_lib.c b/lnet/klnds/socklnd/socklnd_lib.c index 3ac72c9..7f9ffe0 100644 --- a/lnet/klnds/socklnd/socklnd_lib.c +++ b/lnet/klnds/socklnd/socklnd_lib.c @@ -117,7 +117,7 @@ ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx, struct kvec *scratchiov) { struct socket *sock = conn->ksnc_sock; - lnet_kiov_t *kiov = tx->tx_kiov; + struct bio_vec *kiov = tx->tx_kiov; int rc; int nob; @@ -129,13 +129,13 @@ ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx, if (tx->tx_msg.ksm_zc_cookies[0] != 0) { /* Zero copy is enabled */ struct sock *sk = sock->sk; - struct page *page = kiov->kiov_page; - int offset = kiov->kiov_offset; - int fragsize = kiov->kiov_len; + struct page *page = kiov->bv_page; + int offset = kiov->bv_offset; + int fragsize = kiov->bv_len; int msgflg = MSG_DONTWAIT; CDEBUG(D_NET, "page %p + offset %x for %d\n", - page, offset, kiov->kiov_len); + page, offset, kiov->bv_len); if (!list_empty(&conn->ksnc_tx_queue) || fragsize < tx->tx_resid) @@ -162,9 +162,9 @@ ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx, int i; for (nob = i = 0; i < niov; i++) { - scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + - kiov[i].kiov_offset; - nob += scratchiov[i].iov_len = kiov[i].kiov_len; + scratchiov[i].iov_base = kmap(kiov[i].bv_page) + + kiov[i].bv_offset; + nob += scratchiov[i].iov_len = kiov[i].bv_len; } if (!list_empty(&conn->ksnc_tx_queue) || @@ -174,7 +174,7 @@ ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx, rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob); for (i = 0; i < niov; i++) - kunmap(kiov[i].kiov_page); + kunmap(kiov[i].bv_page); } return rc; } @@ -262,7 +262,7 @@ ksocknal_lib_kiov_vunmap(void *addr) } static void * -ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, +ksocknal_lib_kiov_vmap(struct bio_vec *kiov, int niov, struct kvec *iov, struct page **pages) { void *addr; @@ -278,24 +278,24 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags) return NULL; - for (nob = i = 0; i < niov; i++) { - if ((kiov[i].kiov_offset != 0 && i > 0) || - (kiov[i].kiov_offset + kiov[i].kiov_len != + for (nob = i = 0; i < niov; i++) { + if ((kiov[i].bv_offset != 0 && i > 0) || + (kiov[i].bv_offset + kiov[i].bv_len != PAGE_SIZE && i < niov - 1)) - return NULL; + return NULL; - pages[i] = kiov[i].kiov_page; - nob += kiov[i].kiov_len; - } + pages[i] = kiov[i].bv_page; + nob += kiov[i].bv_len; + } - addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL); - if (addr == NULL) - return NULL; + addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL); + if (addr == NULL) + return NULL; - iov->iov_base = addr + kiov[0].kiov_offset; - iov->iov_len = nob; + iov->iov_base = addr + kiov[0].bv_offset; + iov->iov_len = nob; - return addr; + return addr; } int @@ -313,7 +313,7 @@ ksocknal_lib_recv_kiov(struct ksock_conn *conn, struct page **pages, #endif unsigned int niov = conn->ksnc_rx_nkiov; #endif - lnet_kiov_t *kiov = conn->ksnc_rx_kiov; + struct bio_vec *kiov = conn->ksnc_rx_kiov; struct msghdr msg = { .msg_flags = 0 }; @@ -334,9 +334,9 @@ ksocknal_lib_recv_kiov(struct ksock_conn *conn, struct page **pages, } else { for (nob = i = 0; i < niov; i++) { - nob += scratchiov[i].iov_len = kiov[i].kiov_len; - scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + - kiov[i].kiov_offset; + nob += scratchiov[i].iov_len = kiov[i].bv_len; + scratchiov[i].iov_base = kmap(kiov[i].bv_page) + + kiov[i].bv_offset; } n = niov; } @@ -346,34 +346,35 @@ ksocknal_lib_recv_kiov(struct ksock_conn *conn, struct page **pages, rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, n, nob, MSG_DONTWAIT); - if (conn->ksnc_msg.ksm_csum != 0) { - for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { - LASSERT (i < niov); + if (conn->ksnc_msg.ksm_csum != 0) { + for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { + LASSERT(i < niov); - /* Dang! have to kmap again because I have nowhere to stash the - * mapped address. But by doing it while the page is still - * mapped, the kernel just bumps the map count and returns me - * the address it stashed. */ - base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; - fragnob = kiov[i].kiov_len; - if (fragnob > sum) - fragnob = sum; + /* Dang! have to kmap again because I have nowhere to + * stash the mapped address. But by doing it while the + * page is still mapped, the kernel just bumps the map + * count and returns me the address it stashed. + */ + base = kmap(kiov[i].bv_page) + kiov[i].bv_offset; + fragnob = kiov[i].bv_len; + if (fragnob > sum) + fragnob = sum; - conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum, - base, fragnob); + conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum, + base, fragnob); - kunmap(kiov[i].kiov_page); - } - } + kunmap(kiov[i].bv_page); + } + } - if (addr != NULL) { - ksocknal_lib_kiov_vunmap(addr); - } else { - for (i = 0; i < niov; i++) - kunmap(kiov[i].kiov_page); - } + if (addr != NULL) { + ksocknal_lib_kiov_vunmap(addr); + } else { + for (i = 0; i < niov; i++) + kunmap(kiov[i].bv_page); + } - return (rc); + return rc; } void @@ -394,12 +395,12 @@ ksocknal_lib_csum_tx(struct ksock_tx *tx) if (tx->tx_kiov != NULL) { for (i = 0; i < tx->tx_nkiov; i++) { - base = kmap(tx->tx_kiov[i].kiov_page) + - tx->tx_kiov[i].kiov_offset; + base = kmap(tx->tx_kiov[i].bv_page) + + tx->tx_kiov[i].bv_offset; - csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len); + csum = ksocknal_csum(csum, base, tx->tx_kiov[i].bv_len); - kunmap(tx->tx_kiov[i].kiov_page); + kunmap(tx->tx_kiov[i].bv_page); } } else { for (i = 1; i < tx->tx_niov; i++) diff --git a/lnet/lnet/lib-md.c b/lnet/lnet/lib-md.c index 53a8557..b7b0939 100644 --- a/lnet/lnet/lib-md.c +++ b/lnet/lnet/lib-md.c @@ -132,7 +132,7 @@ lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset) /* * There are three cases to handle: - * 1. The MD is using lnet_kiov_t + * 1. The MD is using struct bio_vec * 2. The MD is using struct kvec * 3. Contiguous buffer allocated via vmalloc * @@ -147,10 +147,10 @@ lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset) * DMAed. */ if ((md->md_options & LNET_MD_KIOV) != 0) { - lnet_kiov_t *kiov = md->md_iov.kiov; + struct bio_vec *kiov = md->md_iov.kiov; - while (offset >= kiov->kiov_len) { - offset -= kiov->kiov_len; + while (offset >= kiov->bv_len) { + offset -= kiov->bv_len; niov--; kiov++; if (niov == 0) { @@ -160,7 +160,7 @@ lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset) } cpt = cfs_cpt_of_node(lnet_cpt_table(), - page_to_nid(kiov->kiov_page)); + page_to_nid(kiov->bv_page)); } else { struct kvec *iov = md->md_iov.iov; unsigned long vaddr; @@ -239,11 +239,11 @@ lnet_md_build(struct lnet_libmd *lmd, struct lnet_md *umd, int unlink) for (i = 0; i < (int)niov; i++) { /* We take the page pointer on trust */ - if (lmd->md_iov.kiov[i].kiov_offset + - lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE) + if (lmd->md_iov.kiov[i].bv_offset + + lmd->md_iov.kiov[i].bv_len > PAGE_SIZE) return -EINVAL; /* invalid length */ - total_length += lmd->md_iov.kiov[i].kiov_len; + total_length += lmd->md_iov.kiov[i].bv_len; } lmd->md_length = total_length; diff --git a/lnet/lnet/lib-move.c b/lnet/lnet/lib-move.c index 4c6c382..92e19c5 100644 --- a/lnet/lnet/lib-move.c +++ b/lnet/lnet/lib-move.c @@ -387,21 +387,23 @@ EXPORT_SYMBOL(lnet_extract_iov); unsigned int -lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov) +lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov) { unsigned int nob = 0; LASSERT(niov == 0 || kiov != NULL); while (niov-- > 0) - nob += (kiov++)->kiov_len; + nob += (kiov++)->bv_len; return (nob); } EXPORT_SYMBOL(lnet_kiov_nob); void -lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, - unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset, +lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov, + unsigned int doffset, + unsigned int nsiov, struct bio_vec *siov, + unsigned int soffset, unsigned int nob) { /* NB diov, siov are READ-ONLY */ @@ -415,16 +417,16 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, LASSERT (!in_interrupt ()); LASSERT (ndiov > 0); - while (doffset >= diov->kiov_len) { - doffset -= diov->kiov_len; + while (doffset >= diov->bv_len) { + doffset -= diov->bv_len; diov++; ndiov--; LASSERT(ndiov > 0); } LASSERT(nsiov > 0); - while (soffset >= siov->kiov_len) { - soffset -= siov->kiov_len; + while (soffset >= siov->bv_len) { + soffset -= siov->bv_len; siov++; nsiov--; LASSERT(nsiov > 0); @@ -433,16 +435,16 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, do { LASSERT(ndiov > 0); LASSERT(nsiov > 0); - this_nob = min3(diov->kiov_len - doffset, - siov->kiov_len - soffset, + this_nob = min3(diov->bv_len - doffset, + siov->bv_len - soffset, nob); if (daddr == NULL) - daddr = ((char *)kmap(diov->kiov_page)) + - diov->kiov_offset + doffset; + daddr = ((char *)kmap(diov->bv_page)) + + diov->bv_offset + doffset; if (saddr == NULL) - saddr = ((char *)kmap(siov->kiov_page)) + - siov->kiov_offset + soffset; + saddr = ((char *)kmap(siov->bv_page)) + + siov->bv_offset + soffset; /* Vanishing risk of kmap deadlock when mapping 2 pages. * However in practice at least one of the kiovs will be mapped @@ -451,22 +453,22 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, memcpy (daddr, saddr, this_nob); nob -= this_nob; - if (diov->kiov_len > doffset + this_nob) { + if (diov->bv_len > doffset + this_nob) { daddr += this_nob; doffset += this_nob; } else { - kunmap(diov->kiov_page); + kunmap(diov->bv_page); daddr = NULL; diov++; ndiov--; doffset = 0; } - if (siov->kiov_len > soffset + this_nob) { + if (siov->bv_len > soffset + this_nob) { saddr += this_nob; soffset += this_nob; } else { - kunmap(siov->kiov_page); + kunmap(siov->bv_page); saddr = NULL; siov++; nsiov--; @@ -475,15 +477,16 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, } while (nob > 0); if (daddr != NULL) - kunmap(diov->kiov_page); + kunmap(diov->bv_page); if (saddr != NULL) - kunmap(siov->kiov_page); + kunmap(siov->bv_page); } EXPORT_SYMBOL(lnet_copy_kiov2kiov); void lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset, - unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset, + unsigned int nkiov, struct bio_vec *kiov, + unsigned int kiovoffset, unsigned int nob) { /* NB iov, kiov are READ-ONLY */ @@ -504,8 +507,8 @@ lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset, } LASSERT(nkiov > 0); - while (kiovoffset >= kiov->kiov_len) { - kiovoffset -= kiov->kiov_len; + while (kiovoffset >= kiov->bv_len) { + kiovoffset -= kiov->bv_len; kiov++; nkiov--; LASSERT(nkiov > 0); @@ -515,12 +518,12 @@ lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset, LASSERT(niov > 0); LASSERT(nkiov > 0); this_nob = min3((unsigned int)iov->iov_len - iovoffset, - (unsigned int)kiov->kiov_len - kiovoffset, + (unsigned int)kiov->bv_len - kiovoffset, nob); if (addr == NULL) - addr = ((char *)kmap(kiov->kiov_page)) + - kiov->kiov_offset + kiovoffset; + addr = ((char *)kmap(kiov->bv_page)) + + kiov->bv_offset + kiovoffset; memcpy((char *)iov->iov_base + iovoffset, addr, this_nob); nob -= this_nob; @@ -533,11 +536,11 @@ lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset, iovoffset = 0; } - if (kiov->kiov_len > kiovoffset + this_nob) { + if (kiov->bv_len > kiovoffset + this_nob) { addr += this_nob; kiovoffset += this_nob; } else { - kunmap(kiov->kiov_page); + kunmap(kiov->bv_page); addr = NULL; kiov++; nkiov--; @@ -547,12 +550,13 @@ lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset, } while (nob > 0); if (addr != NULL) - kunmap(kiov->kiov_page); + kunmap(kiov->bv_page); } EXPORT_SYMBOL(lnet_copy_kiov2iov); void -lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset, +lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov, + unsigned int kiovoffset, unsigned int niov, struct kvec *iov, unsigned int iovoffset, unsigned int nob) { @@ -566,8 +570,8 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffse LASSERT (!in_interrupt ()); LASSERT (nkiov > 0); - while (kiovoffset >= kiov->kiov_len) { - kiovoffset -= kiov->kiov_len; + while (kiovoffset >= kiov->bv_len) { + kiovoffset -= kiov->bv_len; kiov++; nkiov--; LASSERT(nkiov > 0); @@ -584,22 +588,22 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffse do { LASSERT(nkiov > 0); LASSERT(niov > 0); - this_nob = min3((unsigned int)kiov->kiov_len - kiovoffset, + this_nob = min3((unsigned int)kiov->bv_len - kiovoffset, (unsigned int)iov->iov_len - iovoffset, nob); if (addr == NULL) - addr = ((char *)kmap(kiov->kiov_page)) + - kiov->kiov_offset + kiovoffset; + addr = ((char *)kmap(kiov->bv_page)) + + kiov->bv_offset + kiovoffset; memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob); nob -= this_nob; - if (kiov->kiov_len > kiovoffset + this_nob) { + if (kiov->bv_len > kiovoffset + this_nob) { addr += this_nob; kiovoffset += this_nob; } else { - kunmap(kiov->kiov_page); + kunmap(kiov->bv_page); addr = NULL; kiov++; nkiov--; @@ -616,13 +620,13 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffse } while (nob > 0); if (addr != NULL) - kunmap(kiov->kiov_page); + kunmap(kiov->bv_page); } EXPORT_SYMBOL(lnet_copy_iov2kiov); int -lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, - int src_niov, lnet_kiov_t *src, +lnet_extract_kiov(int dst_niov, struct bio_vec *dst, + int src_niov, struct bio_vec *src, unsigned int offset, unsigned int len) { /* Initialise 'dst' to the subset of 'src' starting at 'offset', @@ -635,8 +639,8 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, return (0); /* no frags */ LASSERT(src_niov > 0); - while (offset >= src->kiov_len) { /* skip initial frags */ - offset -= src->kiov_len; + while (offset >= src->bv_len) { /* skip initial frags */ + offset -= src->bv_len; src_niov--; src++; LASSERT(src_niov > 0); @@ -647,18 +651,18 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, LASSERT(src_niov > 0); LASSERT((int)niov <= dst_niov); - frag_len = src->kiov_len - offset; - dst->kiov_page = src->kiov_page; - dst->kiov_offset = src->kiov_offset + offset; + frag_len = src->bv_len - offset; + dst->bv_page = src->bv_page; + dst->bv_offset = src->bv_offset + offset; if (len <= frag_len) { - dst->kiov_len = len; - LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE); + dst->bv_len = len; + LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE); return niov; } - dst->kiov_len = frag_len; - LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE); + dst->bv_len = frag_len; + LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE); len -= frag_len; dst++; @@ -675,10 +679,10 @@ lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg, int delayed, unsigned int offset, unsigned int mlen, unsigned int rlen) { - unsigned int niov = 0; + unsigned int niov = 0; struct kvec *iov = NULL; - lnet_kiov_t *kiov = NULL; - int rc; + struct bio_vec *kiov = NULL; + int rc; LASSERT (!in_interrupt ()); LASSERT (mlen == 0 || msg != NULL); diff --git a/lnet/lnet/lo.c b/lnet/lnet/lo.c index 6b881fc..6a433f7 100644 --- a/lnet/lnet/lo.c +++ b/lnet/lnet/lo.c @@ -45,7 +45,7 @@ lolnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) static int lolnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, int delayed, unsigned int niov, - struct kvec *iov, lnet_kiov_t *kiov, + struct kvec *iov, struct bio_vec *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { struct lnet_msg *sendmsg = private; diff --git a/lnet/lnet/router.c b/lnet/lnet/router.c index 9f1d293..a9d6078 100644 --- a/lnet/lnet/router.c +++ b/lnet/lnet/router.c @@ -1234,7 +1234,7 @@ lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages) int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]); while (--npages >= 0) - __free_page(rb->rb_kiov[npages].kiov_page); + __free_page(rb->rb_kiov[npages].bv_page); LIBCFS_FREE(rb, sz); } @@ -1259,15 +1259,15 @@ lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt) GFP_KERNEL | __GFP_ZERO); if (page == NULL) { while (--i >= 0) - __free_page(rb->rb_kiov[i].kiov_page); + __free_page(rb->rb_kiov[i].bv_page); LIBCFS_FREE(rb, sz); return NULL; } - rb->rb_kiov[i].kiov_len = PAGE_SIZE; - rb->rb_kiov[i].kiov_offset = 0; - rb->rb_kiov[i].kiov_page = page; + rb->rb_kiov[i].bv_len = PAGE_SIZE; + rb->rb_kiov[i].bv_offset = 0; + rb->rb_kiov[i].bv_page = page; } return rb; diff --git a/lnet/selftest/brw_test.c b/lnet/selftest/brw_test.c index 55c9486..13f7568 100644 --- a/lnet/selftest/brw_test.c +++ b/lnet/selftest/brw_test.c @@ -237,9 +237,9 @@ brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic) int off; int len; - pg = bk->bk_iovs[i].kiov_page; - off = bk->bk_iovs[i].kiov_offset; - len = bk->bk_iovs[i].kiov_len; + pg = bk->bk_iovs[i].bv_page; + off = bk->bk_iovs[i].bv_offset; + len = bk->bk_iovs[i].bv_len; brw_fill_page(pg, off, len, pattern, magic); } } @@ -254,9 +254,9 @@ brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic) int off; int len; - pg = bk->bk_iovs[i].kiov_page; - off = bk->bk_iovs[i].kiov_offset; - len = bk->bk_iovs[i].kiov_len; + pg = bk->bk_iovs[i].bv_page; + off = bk->bk_iovs[i].bv_offset; + len = bk->bk_iovs[i].bv_len; if (brw_check_page(pg, off, len, pattern, magic) != 0) { CERROR("Bulk page %p (%d/%d) is corrupted!\n", pg, i, bk->bk_niov); diff --git a/lnet/selftest/conrpc.c b/lnet/selftest/conrpc.c index 39d19f0..6b3200d 100644 --- a/lnet/selftest/conrpc.c +++ b/lnet/selftest/conrpc.c @@ -149,10 +149,10 @@ lstcon_rpc_put(struct lstcon_rpc *crpc) LASSERT(list_empty(&crpc->crp_link)); for (i = 0; i < bulk->bk_niov; i++) { - if (bulk->bk_iovs[i].kiov_page == NULL) + if (bulk->bk_iovs[i].bv_page == NULL) continue; - __free_page(bulk->bk_iovs[i].kiov_page); + __free_page(bulk->bk_iovs[i].bv_page); } srpc_client_rpc_decref(crpc->crp_rpc); @@ -696,7 +696,7 @@ lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int feats, } static struct lnet_process_id_packed * -lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov) +lstcon_next_id(int idx, int nkiov, struct bio_vec *kiov) { struct lnet_process_id_packed *pid; int i; @@ -705,14 +705,14 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov) LASSERT (i < nkiov); - pid = (struct lnet_process_id_packed *)page_address(kiov[i].kiov_page); + pid = (struct lnet_process_id_packed *)page_address(kiov[i].bv_page); return &pid[idx % SFW_ID_PER_PAGE]; } static int lstcon_dstnodes_prep(struct lstcon_group *grp, int idx, - int dist, int span, int nkiov, lnet_kiov_t *kiov) + int dist, int span, int nkiov, struct bio_vec *kiov) { struct lnet_process_id_packed *pid; struct lstcon_ndlink *ndl; @@ -850,12 +850,12 @@ lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats, PAGE_SIZE : min_t(int, nob, PAGE_SIZE); nob -= len; - bulk->bk_iovs[i].kiov_offset = 0; - bulk->bk_iovs[i].kiov_len = len; - bulk->bk_iovs[i].kiov_page = + bulk->bk_iovs[i].bv_offset = 0; + bulk->bk_iovs[i].bv_len = len; + bulk->bk_iovs[i].bv_page = alloc_page(GFP_KERNEL); - if (bulk->bk_iovs[i].kiov_page == NULL) { + if (bulk->bk_iovs[i].bv_page == NULL) { lstcon_rpc_put(*crpc); return -ENOMEM; } diff --git a/lnet/selftest/framework.c b/lnet/selftest/framework.c index 19691f6..cced64d 100644 --- a/lnet/selftest/framework.c +++ b/lnet/selftest/framework.c @@ -790,7 +790,7 @@ sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc) struct lnet_process_id_packed id; int j; - dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page); + dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].bv_page); LASSERT (dests != NULL); /* my pages are within KVM always */ id = dests[i % SFW_ID_PER_PAGE]; if (msg->msg_magic != SRPC_MSG_MAGIC) diff --git a/lnet/selftest/rpc.c b/lnet/selftest/rpc.c index 5e7e539..1bb716e 100644 --- a/lnet/selftest/rpc.c +++ b/lnet/selftest/rpc.c @@ -90,9 +90,9 @@ srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int off, LASSERT(off < PAGE_SIZE); LASSERT(nob > 0 && nob <= PAGE_SIZE); - bk->bk_iovs[i].kiov_offset = off; - bk->bk_iovs[i].kiov_page = pg; - bk->bk_iovs[i].kiov_len = nob; + bk->bk_iovs[i].bv_offset = off; + bk->bk_iovs[i].bv_page = pg; + bk->bk_iovs[i].bv_len = nob; return nob; } @@ -105,7 +105,7 @@ srpc_free_bulk(struct srpc_bulk *bk) LASSERT(bk != NULL); for (i = 0; i < bk->bk_niov; i++) { - pg = bk->bk_iovs[i].kiov_page; + pg = bk->bk_iovs[i].bv_page; if (pg == NULL) break; diff --git a/lnet/selftest/selftest.h b/lnet/selftest/selftest.h index a1574b2..c14f9e1 100644 --- a/lnet/selftest/selftest.h +++ b/lnet/selftest/selftest.h @@ -151,11 +151,11 @@ struct srpc_event { /* bulk descriptor */ struct srpc_bulk { - int bk_len; /* len of bulk data */ - struct lnet_handle_md bk_mdh; - int bk_sink; /* sink/source */ - int bk_niov; /* # iov in bk_iovs */ - lnet_kiov_t bk_iovs[0]; + int bk_len; /* len of bulk data */ + struct lnet_handle_md bk_mdh; + int bk_sink; /* sink/source */ + int bk_niov; /* # iov in bk_iovs */ + struct bio_vec bk_iovs[0]; }; /* message buffer descriptor */ diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index 2bb3b46..4f49298 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -1448,8 +1448,8 @@ struct ptlrpc_bulk_desc { struct lnet_handle_md bd_mds[PTLRPC_BULK_OPS_COUNT]; /* encrypted iov, size is either 0 or bd_iov_count. */ - lnet_kiov_t *bd_enc_vec; - lnet_kiov_t *bd_vec; + struct bio_vec *bd_enc_vec; + struct bio_vec *bd_vec; }; enum { diff --git a/lustre/osc/osc_page.c b/lustre/osc/osc_page.c index 8f3258f..dacd953 100644 --- a/lustre/osc/osc_page.c +++ b/lustre/osc/osc_page.c @@ -893,7 +893,7 @@ static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc, for (i = 0; i < page_count; i++) { void *pz; if (desc) - pz = page_zone(desc->bd_vec[i].kiov_page); + pz = page_zone(desc->bd_vec[i].bv_page); else pz = page_zone(aa->aa_ppga[i]->pg); diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index 5602b58..d85bee5 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -66,7 +66,7 @@ static void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc) int i; for (i = 0; i < desc->bd_iov_count ; i++) - put_page(desc->bd_vec[i].kiov_page); + put_page(desc->bd_vec[i].bv_page); } static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc, @@ -243,7 +243,7 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, int pageoffset, int len, int pin) { - lnet_kiov_t *kiov; + struct bio_vec *kiov; LASSERT(desc->bd_iov_count < desc->bd_max_iov); LASSERT(page != NULL); @@ -258,9 +258,9 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, if (pin) get_page(page); - kiov->kiov_page = page; - kiov->kiov_offset = pageoffset; - kiov->kiov_len = len; + kiov->bv_page = page; + kiov->bv_offset = pageoffset; + kiov->bv_len = len; desc->bd_iov_count++; } diff --git a/lustre/ptlrpc/gss/gss_api.h b/lustre/ptlrpc/gss/gss_api.h index a5f203e..aa48101 100644 --- a/lustre/ptlrpc/gss/gss_api.h +++ b/lustre/ptlrpc/gss/gss_api.h @@ -24,7 +24,7 @@ struct gss_api_mech; typedef int (*digest_hash)( struct ahash_request *req, rawobj_t *hdr, int msgcnt, rawobj_t *msgs, - int iovcnt, lnet_kiov_t *iovs); + int iovcnt, struct bio_vec *iovs); /* The mechanism-independent gss-api context: */ struct gss_ctx { @@ -56,14 +56,14 @@ __u32 lgss_get_mic( int msgcnt, rawobj_t *msgs, int iovcnt, - lnet_kiov_t *iovs, + struct bio_vec *iovs, rawobj_t *mic_token); __u32 lgss_verify_mic( struct gss_ctx *ctx, int msgcnt, rawobj_t *msgs, int iovcnt, - lnet_kiov_t *iovs, + struct bio_vec *iovs, rawobj_t *mic_token); __u32 lgss_wrap( struct gss_ctx *ctx, @@ -131,14 +131,14 @@ struct gss_api_ops { int msgcnt, rawobj_t *msgs, int iovcnt, - lnet_kiov_t *iovs, + struct bio_vec *iovs, rawobj_t *mic_token); __u32 (*gss_verify_mic)( struct gss_ctx *ctx, int msgcnt, rawobj_t *msgs, int iovcnt, - lnet_kiov_t *iovs, + struct bio_vec *iovs, rawobj_t *mic_token); __u32 (*gss_wrap)( struct gss_ctx *ctx, diff --git a/lustre/ptlrpc/gss/gss_bulk.c b/lustre/ptlrpc/gss/gss_bulk.c index 59fbd21..3ddc7ee 100644 --- a/lustre/ptlrpc/gss/gss_bulk.c +++ b/lustre/ptlrpc/gss/gss_bulk.c @@ -251,12 +251,12 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, /* fix the actual data size */ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { - if (desc->bd_vec[i].kiov_len + nob > + if (desc->bd_vec[i].bv_len + nob > desc->bd_nob_transferred) { - desc->bd_vec[i].kiov_len = + desc->bd_vec[i].bv_len = desc->bd_nob_transferred - nob; } - nob += desc->bd_vec[i].kiov_len; + nob += desc->bd_vec[i].bv_len; } token.data = bsdv->bsd_data; diff --git a/lustre/ptlrpc/gss/gss_crypto.c b/lustre/ptlrpc/gss/gss_crypto.c index a6ad8f6..7d2e09d 100644 --- a/lustre/ptlrpc/gss/gss_crypto.c +++ b/lustre/ptlrpc/gss/gss_crypto.c @@ -272,7 +272,7 @@ out: int gss_digest_hash(struct ahash_request *req, rawobj_t *hdr, int msgcnt, rawobj_t *msgs, - int iovcnt, lnet_kiov_t *iovs) + int iovcnt, struct bio_vec *iovs) { struct scatterlist sg[1]; struct sg_table sgt; @@ -295,14 +295,14 @@ int gss_digest_hash(struct ahash_request *req, } for (i = 0; i < iovcnt; i++) { - if (iovs[i].kiov_len == 0) + if (iovs[i].bv_len == 0) continue; sg_init_table(sg, 1); - sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, - iovs[i].kiov_offset); + sg_set_page(&sg[0], iovs[i].bv_page, iovs[i].bv_len, + iovs[i].bv_offset); - ahash_request_set_crypt(req, sg, NULL, iovs[i].kiov_len); + ahash_request_set_crypt(req, sg, NULL, iovs[i].bv_len); rc = crypto_ahash_update(req); if (rc) return rc; @@ -325,7 +325,7 @@ int gss_digest_hash(struct ahash_request *req, int gss_digest_hash_compat(struct ahash_request *req, rawobj_t *hdr, int msgcnt, rawobj_t *msgs, - int iovcnt, lnet_kiov_t *iovs) + int iovcnt, struct bio_vec *iovs) { struct scatterlist sg[1]; struct sg_table sgt; @@ -348,14 +348,14 @@ int gss_digest_hash_compat(struct ahash_request *req, } for (i = 0; i < iovcnt; i++) { - if (iovs[i].kiov_len == 0) + if (iovs[i].bv_len == 0) continue; sg_init_table(sg, 1); - sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, - iovs[i].kiov_offset); + sg_set_page(&sg[0], iovs[i].bv_page, iovs[i].bv_len, + iovs[i].bv_offset); - ahash_request_set_crypt(req, sg, NULL, iovs[i].kiov_len); + ahash_request_set_crypt(req, sg, NULL, iovs[i].bv_len); rc = crypto_ahash_update(req); if (rc) return rc; diff --git a/lustre/ptlrpc/gss/gss_crypto.h b/lustre/ptlrpc/gss/gss_crypto.h index 39a2b4e..8e1061b 100644 --- a/lustre/ptlrpc/gss/gss_crypto.h +++ b/lustre/ptlrpc/gss/gss_crypto.h @@ -24,10 +24,11 @@ void gss_teardown_sgtable(struct sg_table *sgt); int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv, const void *in, void *out, size_t length); int gss_digest_hash(struct ahash_request *req, rawobj_t *hdr, - int msgcnt, rawobj_t *msgs, int iovcnt, lnet_kiov_t *iovs); + int msgcnt, rawobj_t *msgs, int iovcnt, + struct bio_vec *iovs); int gss_digest_hash_compat(struct ahash_request *req, rawobj_t *hdr, int msgcnt, rawobj_t *msgs, - int iovcnt, lnet_kiov_t *iovs); + int iovcnt, struct bio_vec *iovs); int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize); int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv, int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj, diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index b85afa6..d675c70 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -445,7 +445,7 @@ __s32 krb5_make_checksum(__u32 enctype, struct gss_keyblock *kb, struct krb5_header *khdr, int msgcnt, rawobj_t *msgs, - int iovcnt, lnet_kiov_t *iovs, + int iovcnt, struct bio_vec *iovs, rawobj_t *cksum, digest_hash hash_func) { @@ -581,7 +581,7 @@ __u32 gss_get_mic_kerberos(struct gss_ctx *gctx, int msgcnt, rawobj_t *msgs, int iovcnt, - lnet_kiov_t *iovs, + struct bio_vec *iovs, rawobj_t *token) { struct krb5_ctx *kctx = gctx->internal_ctx_id; @@ -618,7 +618,7 @@ __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx, int msgcnt, rawobj_t *msgs, int iovcnt, - lnet_kiov_t *iovs, + struct bio_vec *iovs, rawobj_t *token) { struct krb5_ctx *kctx = gctx->internal_ctx_id; @@ -717,19 +717,19 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, /* encrypt clear pages */ for (i = 0; i < desc->bd_iov_count; i++) { sg_init_table(&src, 1); - sg_set_page(&src, desc->bd_vec[i].kiov_page, - (desc->bd_vec[i].kiov_len + + sg_set_page(&src, desc->bd_vec[i].bv_page, + (desc->bd_vec[i].bv_len + blocksize - 1) & (~(blocksize - 1)), - desc->bd_vec[i].kiov_offset); + desc->bd_vec[i].bv_offset); if (adj_nob) nob += src.length; sg_init_table(&dst, 1); - sg_set_page(&dst, desc->bd_enc_vec[i].kiov_page, + sg_set_page(&dst, desc->bd_enc_vec[i].bv_page, src.length, src.offset); - desc->bd_enc_vec[i].kiov_offset = dst.offset; - desc->bd_enc_vec[i].kiov_len = dst.length; + desc->bd_enc_vec[i].bv_offset = dst.offset; + desc->bd_enc_vec[i].bv_len = dst.length; rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, src.length); @@ -772,18 +772,18 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, * desc->bd_nob_transferred is the size of cipher text received. * desc->bd_nob is the target size of plain text supposed to be. * - * if adj_nob != 0, we adjust each page's kiov_len to the actual + * if adj_nob != 0, we adjust each page's bv_len to the actual * plain text size. * - for client read: we don't know data size for each page, so - * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might + * bd_iov[]->bv_len is set to PAGE_SIZE, but actual data received might * be smaller, so we need to adjust it according to - * bd_u.bd_kiov.bd_enc_vec[]->kiov_len. + * bd_u.bd_kiov.bd_enc_vec[]->bv_len. * this means we DO NOT support the situation that server send an odd size * data in a page which is not the last one. * - for server write: we knows exactly data size for each page being expected, - * thus kiov_len is accurate already, so we should not adjust it at all. - * and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be - * round_up(bd_iov[]->kiov_len) which + * thus bv_len is accurate already, so we should not adjust it at all. + * and bd_u.bd_kiov.bd_enc_vec[]->bv_len should be + * round_up(bd_iov[]->bv_len) which * should have been done by prep_bulk(). */ static @@ -842,49 +842,49 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred; i++) { - if (desc->bd_enc_vec[i].kiov_offset % blocksize + if (desc->bd_enc_vec[i].bv_offset % blocksize != 0 || - desc->bd_enc_vec[i].kiov_len % blocksize + desc->bd_enc_vec[i].bv_len % blocksize != 0) { CERROR("page %d: odd offset %u len %u, blocksize %d\n", - i, desc->bd_enc_vec[i].kiov_offset, - desc->bd_enc_vec[i].kiov_len, + i, desc->bd_enc_vec[i].bv_offset, + desc->bd_enc_vec[i].bv_len, blocksize); return -EFAULT; } if (adj_nob) { - if (ct_nob + desc->bd_enc_vec[i].kiov_len > + if (ct_nob + desc->bd_enc_vec[i].bv_len > desc->bd_nob_transferred) - desc->bd_enc_vec[i].kiov_len = + desc->bd_enc_vec[i].bv_len = desc->bd_nob_transferred - ct_nob; - desc->bd_vec[i].kiov_len = - desc->bd_enc_vec[i].kiov_len; - if (pt_nob + desc->bd_enc_vec[i].kiov_len > + desc->bd_vec[i].bv_len = + desc->bd_enc_vec[i].bv_len; + if (pt_nob + desc->bd_enc_vec[i].bv_len > desc->bd_nob) - desc->bd_vec[i].kiov_len = + desc->bd_vec[i].bv_len = desc->bd_nob - pt_nob; } else { /* this should be guaranteed by LNET */ LASSERT(ct_nob + desc->bd_enc_vec[i]. - kiov_len <= + bv_len <= desc->bd_nob_transferred); - LASSERT(desc->bd_vec[i].kiov_len <= - desc->bd_enc_vec[i].kiov_len); + LASSERT(desc->bd_vec[i].bv_len <= + desc->bd_enc_vec[i].bv_len); } - if (desc->bd_enc_vec[i].kiov_len == 0) + if (desc->bd_enc_vec[i].bv_len == 0) continue; sg_init_table(&src, 1); - sg_set_page(&src, desc->bd_enc_vec[i].kiov_page, - desc->bd_enc_vec[i].kiov_len, - desc->bd_enc_vec[i].kiov_offset); + sg_set_page(&src, desc->bd_enc_vec[i].bv_page, + desc->bd_enc_vec[i].bv_len, + desc->bd_enc_vec[i].bv_offset); dst = src; - if (desc->bd_vec[i].kiov_len % blocksize == 0) + if (desc->bd_vec[i].bv_len % blocksize == 0) sg_assign_page(&dst, - desc->bd_vec[i].kiov_page); + desc->bd_vec[i].bv_page); rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, src.length); @@ -893,17 +893,17 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, return rc; } - if (desc->bd_vec[i].kiov_len % blocksize != 0) { - memcpy(page_address(desc->bd_vec[i].kiov_page) + - desc->bd_vec[i].kiov_offset, + if (desc->bd_vec[i].bv_len % blocksize != 0) { + memcpy(page_address(desc->bd_vec[i].bv_page) + + desc->bd_vec[i].bv_offset, page_address(desc->bd_enc_vec[i]. - kiov_page) + - desc->bd_vec[i].kiov_offset, - desc->bd_vec[i].kiov_len); + bv_page) + + desc->bd_vec[i].bv_offset, + desc->bd_vec[i].bv_len); } - ct_nob += desc->bd_enc_vec[i].kiov_len; - pt_nob += desc->bd_vec[i].kiov_len; + ct_nob += desc->bd_enc_vec[i].bv_len; + pt_nob += desc->bd_vec[i].bv_len; } if (unlikely(ct_nob != desc->bd_nob_transferred)) { @@ -921,7 +921,7 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, /* if needed, clear up the rest unused iovs */ if (adj_nob) while (i < desc->bd_iov_count) - desc->bd_vec[i++].kiov_len = 0; + desc->bd_vec[i++].bv_len = 0; /* decrypt tail (krb5 header) */ rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize, @@ -1113,21 +1113,21 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx, blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); for (i = 0; i < desc->bd_iov_count; i++) { - LASSERT(desc->bd_enc_vec[i].kiov_page); + LASSERT(desc->bd_enc_vec[i].bv_page); /* * offset should always start at page boundary of either * client or server side. */ - if (desc->bd_vec[i].kiov_offset & blocksize) { + if (desc->bd_vec[i].bv_offset & blocksize) { CERROR("odd offset %d in page %d\n", - desc->bd_vec[i].kiov_offset, i); + desc->bd_vec[i].bv_offset, i); return GSS_S_FAILURE; } - desc->bd_enc_vec[i].kiov_offset = - desc->bd_vec[i].kiov_offset; - desc->bd_enc_vec[i].kiov_len = - (desc->bd_vec[i].kiov_len + + desc->bd_enc_vec[i].bv_offset = + desc->bd_vec[i].bv_offset; + desc->bd_enc_vec[i].bv_len = + (desc->bd_vec[i].bv_len + blocksize - 1) & (~(blocksize - 1)); } diff --git a/lustre/ptlrpc/gss/gss_mech_switch.c b/lustre/ptlrpc/gss/gss_mech_switch.c index 4864191..ee2b851 100644 --- a/lustre/ptlrpc/gss/gss_mech_switch.c +++ b/lustre/ptlrpc/gss/gss_mech_switch.c @@ -207,11 +207,11 @@ __u32 lgss_inquire_context(struct gss_ctx *context_handle, /* gss_get_mic: compute a mic over message and return mic_token. */ __u32 lgss_get_mic(struct gss_ctx *context_handle, - int msgcnt, - rawobj_t *msg, - int iovcnt, - lnet_kiov_t *iovs, - rawobj_t *mic_token) + int msgcnt, + rawobj_t *msg, + int iovcnt, + struct bio_vec *iovs, + rawobj_t *mic_token) { LASSERT(context_handle); LASSERT(context_handle->mech_type); @@ -229,11 +229,11 @@ __u32 lgss_get_mic(struct gss_ctx *context_handle, /* gss_verify_mic: check whether the provided mic_token verifies message. */ __u32 lgss_verify_mic(struct gss_ctx *context_handle, - int msgcnt, - rawobj_t *msg, - int iovcnt, - lnet_kiov_t *iovs, - rawobj_t *mic_token) + int msgcnt, + rawobj_t *msg, + int iovcnt, + struct bio_vec *iovs, + rawobj_t *mic_token) { LASSERT(context_handle); LASSERT(context_handle->mech_type); diff --git a/lustre/ptlrpc/gss/gss_null_mech.c b/lustre/ptlrpc/gss/gss_null_mech.c index 1e946f8..142438a 100644 --- a/lustre/ptlrpc/gss/gss_null_mech.c +++ b/lustre/ptlrpc/gss/gss_null_mech.c @@ -152,7 +152,7 @@ int gss_display_null(struct gss_ctx *gss_context, char *buf, int bufsize) static __u32 gss_get_mic_null(struct gss_ctx *gss_context, int message_count, - rawobj_t *messages, int iov_count, lnet_kiov_t *iovs, + rawobj_t *messages, int iov_count, struct bio_vec *iovs, rawobj_t *token) { return GSS_S_COMPLETE; @@ -160,7 +160,8 @@ __u32 gss_get_mic_null(struct gss_ctx *gss_context, int message_count, static __u32 gss_verify_mic_null(struct gss_ctx *gss_context, int message_count, - rawobj_t *messages, int iov_count, lnet_kiov_t *iovs, + rawobj_t *messages, int iov_count, + struct bio_vec *iovs, rawobj_t *token) { return GSS_S_COMPLETE; diff --git a/lustre/ptlrpc/gss/gss_sk_mech.c b/lustre/ptlrpc/gss/gss_sk_mech.c index 8cdd1f7..dbed77c 100644 --- a/lustre/ptlrpc/gss/gss_sk_mech.c +++ b/lustre/ptlrpc/gss/gss_sk_mech.c @@ -315,7 +315,7 @@ __u32 gss_inquire_context_sk(struct gss_ctx *gss_context, static u32 sk_make_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key, int msg_count, - rawobj_t *msgs, int iov_count, lnet_kiov_t *iovs, + rawobj_t *msgs, int iov_count, struct bio_vec *iovs, rawobj_t *token, digest_hash hash_func) { struct ahash_request *req; @@ -347,7 +347,7 @@ __u32 gss_get_mic_sk(struct gss_ctx *gss_context, int message_count, rawobj_t *messages, int iov_count, - lnet_kiov_t *iovs, + struct bio_vec *iovs, rawobj_t *token) { struct sk_ctx *skc = gss_context->internal_ctx_id; @@ -360,7 +360,7 @@ __u32 gss_get_mic_sk(struct gss_ctx *gss_context, static u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key, int message_count, rawobj_t *messages, - int iov_count, lnet_kiov_t *iovs, + int iov_count, struct bio_vec *iovs, rawobj_t *token, digest_hash hash_func) { rawobj_t checksum = RAWOBJ_EMPTY; @@ -404,7 +404,7 @@ cleanup: static u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac, rawobj_t *key, int msgcnt, rawobj_t *msgs, int iovcnt, - lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token) + struct bio_vec *iovs, int iov_bytes, rawobj_t *token) { rawobj_t checksum = RAWOBJ_EMPTY; struct ahash_request *req; @@ -450,15 +450,15 @@ u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac, rawobj_t *key, } for (i = 0; i < iovcnt && iov_bytes > 0; i++) { - if (iovs[i].kiov_len == 0) + if (iovs[i].bv_len == 0) continue; - bytes = min_t(int, iov_bytes, iovs[i].kiov_len); + bytes = min_t(int, iov_bytes, iovs[i].bv_len); iov_bytes -= bytes; sg_init_table(sg, 1); - sg_set_page(&sg[0], iovs[i].kiov_page, bytes, - iovs[i].kiov_offset); + sg_set_page(&sg[0], iovs[i].bv_page, bytes, + iovs[i].bv_offset); ahash_request_set_crypt(req, sg, NULL, bytes); rc = crypto_ahash_update(req); if (rc) @@ -486,7 +486,7 @@ __u32 gss_verify_mic_sk(struct gss_ctx *gss_context, int message_count, rawobj_t *messages, int iov_count, - lnet_kiov_t *iovs, + struct bio_vec *iovs, rawobj_t *token) { struct sk_ctx *skc = gss_context->internal_ctx_id; @@ -612,16 +612,16 @@ __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context, blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm); for (i = 0; i < desc->bd_iov_count; i++) { - if (desc->bd_vec[i].kiov_offset & blocksize) { + if (desc->bd_vec[i].bv_offset & blocksize) { CERROR("offset %d not blocksize aligned\n", - desc->bd_vec[i].kiov_offset); + desc->bd_vec[i].bv_offset); return GSS_S_FAILURE; } - desc->bd_enc_vec[i].kiov_offset = - desc->bd_vec[i].kiov_offset; - desc->bd_enc_vec[i].kiov_len = - sk_block_mask(desc->bd_vec[i].kiov_len, blocksize); + desc->bd_enc_vec[i].bv_offset = + desc->bd_vec[i].bv_offset; + desc->bd_enc_vec[i].bv_len = + sk_block_mask(desc->bd_vec[i].bv_len, blocksize); } return GSS_S_COMPLETE; @@ -649,17 +649,17 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, sg_init_table(&ctxt, 1); for (i = 0; i < desc->bd_iov_count; i++) { - sg_set_page(&ptxt, desc->bd_vec[i].kiov_page, - sk_block_mask(desc->bd_vec[i].kiov_len, + sg_set_page(&ptxt, desc->bd_vec[i].bv_page, + sk_block_mask(desc->bd_vec[i].bv_len, blocksize), - desc->bd_vec[i].kiov_offset); + desc->bd_vec[i].bv_offset); nob += ptxt.length; - sg_set_page(&ctxt, desc->bd_enc_vec[i].kiov_page, + sg_set_page(&ctxt, desc->bd_enc_vec[i].bv_page, ptxt.length, ptxt.offset); - desc->bd_enc_vec[i].kiov_offset = ctxt.offset; - desc->bd_enc_vec[i].kiov_len = ctxt.length; + desc->bd_enc_vec[i].bv_offset = ctxt.offset; + desc->bd_enc_vec[i].bv_len = ctxt.length; rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt, ptxt.length); @@ -704,11 +704,11 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred; i++) { - lnet_kiov_t *piov = &desc->bd_vec[i]; - lnet_kiov_t *ciov = &desc->bd_enc_vec[i]; + struct bio_vec *piov = &desc->bd_vec[i]; + struct bio_vec *ciov = &desc->bd_enc_vec[i]; - if (ciov->kiov_offset % blocksize != 0 || - ciov->kiov_len % blocksize != 0) { + if (ciov->bv_offset % blocksize != 0 || + ciov->bv_len % blocksize != 0) { CERROR("Invalid bulk descriptor vector\n"); return GSS_S_DEFECTIVE_TOKEN; } @@ -718,38 +718,38 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, * integrity only mode */ if (adj_nob) { /* cipher text must not exceed transferred size */ - if (ciov->kiov_len + cnob > desc->bd_nob_transferred) - ciov->kiov_len = + if (ciov->bv_len + cnob > desc->bd_nob_transferred) + ciov->bv_len = desc->bd_nob_transferred - cnob; - piov->kiov_len = ciov->kiov_len; + piov->bv_len = ciov->bv_len; /* plain text must not exceed bulk's size */ - if (ciov->kiov_len + pnob > desc->bd_nob) - piov->kiov_len = desc->bd_nob - pnob; + if (ciov->bv_len + pnob > desc->bd_nob) + piov->bv_len = desc->bd_nob - pnob; } else { /* Taken from krb5_decrypt since it was not verified * whether or not LNET guarantees these */ - if (ciov->kiov_len + cnob > desc->bd_nob_transferred || - piov->kiov_len > ciov->kiov_len) { + if (ciov->bv_len + cnob > desc->bd_nob_transferred || + piov->bv_len > ciov->bv_len) { CERROR("Invalid decrypted length\n"); return GSS_S_FAILURE; } } - if (ciov->kiov_len == 0) + if (ciov->bv_len == 0) continue; sg_init_table(&ctxt, 1); - sg_set_page(&ctxt, ciov->kiov_page, ciov->kiov_len, - ciov->kiov_offset); + sg_set_page(&ctxt, ciov->bv_page, ciov->bv_len, + ciov->bv_offset); ptxt = ctxt; /* In the event the plain text size is not a multiple * of blocksize we decrypt in place and copy the result * after the decryption */ - if (piov->kiov_len % blocksize == 0) - sg_assign_page(&ptxt, piov->kiov_page); + if (piov->bv_len % blocksize == 0) + sg_assign_page(&ptxt, piov->bv_page); rc = crypto_blkcipher_decrypt_iv(&cdesc, &ptxt, &ctxt, ctxt.length); @@ -758,22 +758,22 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, return GSS_S_FAILURE; } - if (piov->kiov_len % blocksize != 0) { - memcpy(page_address(piov->kiov_page) + - piov->kiov_offset, - page_address(ciov->kiov_page) + - ciov->kiov_offset, - piov->kiov_len); + if (piov->bv_len % blocksize != 0) { + memcpy(page_address(piov->bv_page) + + piov->bv_offset, + page_address(ciov->bv_page) + + ciov->bv_offset, + piov->bv_len); } - cnob += ciov->kiov_len; - pnob += piov->kiov_len; + cnob += ciov->bv_len; + pnob += piov->bv_len; } /* if needed, clear up the rest unused iovs */ if (adj_nob) while (i < desc->bd_iov_count) - desc->bd_vec[i++].kiov_len = 0; + desc->bd_vec[i++].bv_len = 0; if (unlikely(cnob != desc->bd_nob_transferred)) { CERROR("%d cipher text transferred but only %d decrypted\n", diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c index 33b9a09..66032b1 100644 --- a/lustre/ptlrpc/sec_bulk.c +++ b/lustre/ptlrpc/sec_bulk.c @@ -632,7 +632,7 @@ again: for (i = 0; i < desc->bd_iov_count; i++) { LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL); - desc->bd_enc_vec[i].kiov_page = + desc->bd_enc_vec[i].bv_page = page_pools.epp_pools[p_idx][g_idx]; page_pools.epp_pools[p_idx][g_idx] = NULL; @@ -683,12 +683,12 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) LASSERT(page_pools.epp_pools[p_idx]); for (i = 0; i < desc->bd_iov_count; i++) { - LASSERT(desc->bd_enc_vec[i].kiov_page != NULL); + LASSERT(desc->bd_enc_vec[i].bv_page); LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]); LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL); page_pools.epp_pools[p_idx][g_idx] = - desc->bd_enc_vec[i].kiov_page; + desc->bd_enc_vec[i].bv_page; if (++g_idx == PAGES_PER_POOL) { p_idx++; @@ -924,10 +924,10 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, for (i = 0; i < desc->bd_iov_count; i++) { cfs_crypto_hash_update_page(req, - desc->bd_vec[i].kiov_page, - desc->bd_vec[i].kiov_offset & + desc->bd_vec[i].bv_page, + desc->bd_vec[i].bv_offset & ~PAGE_MASK, - desc->bd_vec[i].kiov_len); + desc->bd_vec[i].bv_len); } if (hashsize > buflen) { diff --git a/lustre/ptlrpc/sec_plain.c b/lustre/ptlrpc/sec_plain.c index 881add9..ea9a469 100644 --- a/lustre/ptlrpc/sec_plain.c +++ b/lustre/ptlrpc/sec_plain.c @@ -156,13 +156,13 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc) unsigned int off, i; for (i = 0; i < desc->bd_iov_count; i++) { - if (desc->bd_vec[i].kiov_len == 0) + if (desc->bd_vec[i].bv_len == 0) continue; - ptr = kmap(desc->bd_vec[i].kiov_page); - off = desc->bd_vec[i].kiov_offset & ~PAGE_MASK; + ptr = kmap(desc->bd_vec[i].bv_page); + off = desc->bd_vec[i].bv_offset & ~PAGE_MASK; ptr[off] ^= 0x1; - kunmap(desc->bd_vec[i].kiov_page); + kunmap(desc->bd_vec[i].bv_page); return; } } @@ -355,12 +355,12 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, /* fix the actual data size */ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { - if (desc->bd_vec[i].kiov_len + + if (desc->bd_vec[i].bv_len + nob > desc->bd_nob_transferred) { - desc->bd_vec[i].kiov_len = + desc->bd_vec[i].bv_len = desc->bd_nob_transferred - nob; } - nob += desc->bd_vec[i].kiov_len; + nob += desc->bd_vec[i].bv_len; } rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,