Whamcloud - gitweb
LU-13004 ptlrpc: simplify bd_vec access. 73/36973/4
authorMr NeilBrown <neilb@suse.de>
Wed, 4 Dec 2019 02:38:05 +0000 (13:38 +1100)
committerOleg Drokin <green@whamcloud.com>
Thu, 5 Mar 2020 22:36:32 +0000 (22:36 +0000)
Now that there are no kvecs in ptlrpc_bulk_desc, only bdvecs, we can
simplify the access, discarding the containing struct and the macros,
and just accessing the fields directly.

Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: I068a7a280f130bf0b53b9c572ed47ef0cc999102
Reviewed-on: https://review.whamcloud.com/36973
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/include/lustre_net.h
lustre/osc/osc_page.c
lustre/ptlrpc/client.c
lustre/ptlrpc/gss/gss_bulk.c
lustre/ptlrpc/gss/gss_krb5_mech.c
lustre/ptlrpc/gss/gss_sk_mech.c
lustre/ptlrpc/pers.c
lustre/ptlrpc/sec_bulk.c
lustre/ptlrpc/sec_plain.c

index a8420ba..678e482 100644 (file)
@@ -1447,20 +1447,11 @@ struct ptlrpc_bulk_desc {
        /** array of associated MDs */
        struct lnet_handle_md   bd_mds[PTLRPC_BULK_OPS_COUNT];
 
        /** array of associated MDs */
        struct lnet_handle_md   bd_mds[PTLRPC_BULK_OPS_COUNT];
 
-       struct {
-               /*
-                * encrypt iov, size is either 0 or bd_iov_count.
-                */
-               lnet_kiov_t *bd_enc_vec;
-               lnet_kiov_t *bd_vec;
-       } bd_kiov;
+       /* encrypted iov, size is either 0 or bd_iov_count. */
+       lnet_kiov_t *bd_enc_vec;
+       lnet_kiov_t *bd_vec;
 };
 
 };
 
-#define GET_KIOV(desc)                 ((desc)->bd_kiov.bd_vec)
-#define BD_GET_KIOV(desc, i)           ((desc)->bd_kiov.bd_vec[i])
-#define GET_ENC_KIOV(desc)             ((desc)->bd_kiov.bd_enc_vec)
-#define BD_GET_ENC_KIOV(desc, i)       ((desc)->bd_kiov.bd_enc_vec[i])
-
 enum {
        SVC_INIT        = 0,
        SVC_STOPPED     = 1 << 0,
 enum {
        SVC_INIT        = 0,
        SVC_STOPPED     = 1 << 0,
index 9dbad53..d0fd5e2 100644 (file)
@@ -893,7 +893,7 @@ static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
        for (i = 0; i < page_count; i++) {
                void *pz;
                if (desc)
        for (i = 0; i < page_count; i++) {
                void *pz;
                if (desc)
-                       pz = page_zone(BD_GET_KIOV(desc, i).kiov_page);
+                       pz = page_zone(desc->bd_vec[i].kiov_page);
                else
                        pz = page_zone(aa->aa_ppga[i]->pg);
 
                else
                        pz = page_zone(aa->aa_ppga[i]->pg);
 
index 740019a..ba4c079 100644 (file)
@@ -66,7 +66,7 @@ static void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
        int i;
 
        for (i = 0; i < desc->bd_iov_count ; i++)
        int i;
 
        for (i = 0; i < desc->bd_iov_count ; i++)
-               put_page(BD_GET_KIOV(desc, i).kiov_page);
+               put_page(desc->bd_vec[i].kiov_page);
 }
 
 static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc,
 }
 
 static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc,
@@ -173,9 +173,9 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
        if (!desc)
                return NULL;
 
        if (!desc)
                return NULL;
 
-       OBD_ALLOC_LARGE(GET_KIOV(desc),
-                       nfrags * sizeof(*GET_KIOV(desc)));
-       if (!GET_KIOV(desc))
+       OBD_ALLOC_LARGE(desc->bd_vec,
+                       nfrags * sizeof(*desc->bd_vec));
+       if (!desc->bd_vec)
                goto out;
 
        spin_lock_init(&desc->bd_lock);
                goto out;
 
        spin_lock_init(&desc->bd_lock);
@@ -251,7 +251,7 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
        LASSERT(len > 0);
        LASSERT(pageoffset + len <= PAGE_SIZE);
 
        LASSERT(len > 0);
        LASSERT(pageoffset + len <= PAGE_SIZE);
 
-       kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
+       kiov = &desc->bd_vec[desc->bd_iov_count];
 
        desc->bd_nob += len;
 
 
        desc->bd_nob += len;
 
@@ -286,8 +286,8 @@ void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
        if (desc->bd_frag_ops->release_frags != NULL)
                desc->bd_frag_ops->release_frags(desc);
 
        if (desc->bd_frag_ops->release_frags != NULL)
                desc->bd_frag_ops->release_frags(desc);
 
-       OBD_FREE_LARGE(GET_KIOV(desc),
-                      desc->bd_max_iov * sizeof(*GET_KIOV(desc)));
+       OBD_FREE_LARGE(desc->bd_vec,
+                      desc->bd_max_iov * sizeof(*desc->bd_vec));
        OBD_FREE_PTR(desc);
        EXIT;
 }
        OBD_FREE_PTR(desc);
        EXIT;
 }
index b418ea7..59fbd21 100644 (file)
@@ -126,7 +126,7 @@ int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
 
                        maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
                                           desc->bd_iov_count,
 
                        maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
                                           desc->bd_iov_count,
-                                          GET_KIOV(desc),
+                                          desc->bd_vec,
                                           &token);
                        if (maj != GSS_S_COMPLETE) {
                                CWARN("failed to sign bulk data: %x\n", maj);
                                           &token);
                        if (maj != GSS_S_COMPLETE) {
                                CWARN("failed to sign bulk data: %x\n", maj);
@@ -251,12 +251,12 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
 
                        /* fix the actual data size */
                        for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
 
                        /* fix the actual data size */
                        for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
-                               if (BD_GET_KIOV(desc, i).kiov_len + nob >
+                               if (desc->bd_vec[i].kiov_len + nob >
                                    desc->bd_nob_transferred) {
                                    desc->bd_nob_transferred) {
-                                       BD_GET_KIOV(desc, i).kiov_len =
+                                       desc->bd_vec[i].kiov_len =
                                                desc->bd_nob_transferred - nob;
                                }
                                                desc->bd_nob_transferred - nob;
                                }
-                               nob += BD_GET_KIOV(desc, i).kiov_len;
+                               nob += desc->bd_vec[i].kiov_len;
                        }
 
                        token.data = bsdv->bsd_data;
                        }
 
                        token.data = bsdv->bsd_data;
@@ -265,7 +265,7 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
 
                        maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
                                              desc->bd_iov_count,
 
                        maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
                                              desc->bd_iov_count,
-                                             GET_KIOV(desc),
+                                             desc->bd_vec,
                                              &token);
                         if (maj != GSS_S_COMPLETE) {
                                 CERROR("failed to verify bulk read: %x\n", maj);
                                              &token);
                         if (maj != GSS_S_COMPLETE) {
                                 CERROR("failed to verify bulk read: %x\n", maj);
@@ -400,7 +400,7 @@ int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
 
                maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
                                      desc->bd_iov_count,
 
                maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
                                      desc->bd_iov_count,
-                                     GET_KIOV(desc), &token);
+                                     desc->bd_vec, &token);
                 if (maj != GSS_S_COMPLETE) {
                         bsdv->bsd_flags |= BSD_FL_ERR;
                         CERROR("failed to verify bulk signature: %x\n", maj);
                 if (maj != GSS_S_COMPLETE) {
                         bsdv->bsd_flags |= BSD_FL_ERR;
                         CERROR("failed to verify bulk signature: %x\n", maj);
@@ -477,7 +477,7 @@ int gss_svc_wrap_bulk(struct ptlrpc_request *req,
 
                maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
                                   desc->bd_iov_count,
 
                maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
                                   desc->bd_iov_count,
-                                  GET_KIOV(desc), &token);
+                                  desc->bd_vec, &token);
                if (maj != GSS_S_COMPLETE) {
                         bsdv->bsd_flags |= BSD_FL_ERR;
                         CERROR("failed to sign bulk data: %x\n", maj);
                if (maj != GSS_S_COMPLETE) {
                         bsdv->bsd_flags |= BSD_FL_ERR;
                         CERROR("failed to sign bulk data: %x\n", maj);
index 5ff7b3b..b85afa6 100644 (file)
@@ -682,7 +682,7 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
         int                     blocksize, i, rc, nob = 0;
 
         LASSERT(desc->bd_iov_count);
         int                     blocksize, i, rc, nob = 0;
 
         LASSERT(desc->bd_iov_count);
-       LASSERT(GET_ENC_KIOV(desc));
+       LASSERT(desc->bd_enc_vec);
 
        blocksize = crypto_blkcipher_blocksize(tfm);
         LASSERT(blocksize > 1);
 
        blocksize = crypto_blkcipher_blocksize(tfm);
         LASSERT(blocksize > 1);
@@ -717,19 +717,19 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
         /* encrypt clear pages */
         for (i = 0; i < desc->bd_iov_count; i++) {
                sg_init_table(&src, 1);
         /* encrypt clear pages */
         for (i = 0; i < desc->bd_iov_count; i++) {
                sg_init_table(&src, 1);
-               sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
-                           (BD_GET_KIOV(desc, i).kiov_len +
+               sg_set_page(&src, desc->bd_vec[i].kiov_page,
+                           (desc->bd_vec[i].kiov_len +
                                blocksize - 1) &
                            (~(blocksize - 1)),
                                blocksize - 1) &
                            (~(blocksize - 1)),
-                           BD_GET_KIOV(desc, i).kiov_offset);
+                           desc->bd_vec[i].kiov_offset);
                if (adj_nob)
                        nob += src.length;
                sg_init_table(&dst, 1);
                if (adj_nob)
                        nob += src.length;
                sg_init_table(&dst, 1);
-               sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
+               sg_set_page(&dst, desc->bd_enc_vec[i].kiov_page,
                            src.length, src.offset);
 
                            src.length, src.offset);
 
-               BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
-               BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
+               desc->bd_enc_vec[i].kiov_offset = dst.offset;
+               desc->bd_enc_vec[i].kiov_len = dst.length;
 
                rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
                                                     src.length);
 
                rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
                                                     src.length);
@@ -802,7 +802,7 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
         int                     blocksize, i, rc;
 
         LASSERT(desc->bd_iov_count);
         int                     blocksize, i, rc;
 
         LASSERT(desc->bd_iov_count);
-       LASSERT(GET_ENC_KIOV(desc));
+       LASSERT(desc->bd_enc_vec);
         LASSERT(desc->bd_nob_transferred);
 
        blocksize = crypto_blkcipher_blocksize(tfm);
         LASSERT(desc->bd_nob_transferred);
 
        blocksize = crypto_blkcipher_blocksize(tfm);
@@ -842,49 +842,49 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
 
        for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
             i++) {
 
        for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
             i++) {
-               if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
+               if (desc->bd_enc_vec[i].kiov_offset % blocksize
                    != 0 ||
                    != 0 ||
-                   BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
+                   desc->bd_enc_vec[i].kiov_len % blocksize
                    != 0) {
                        CERROR("page %d: odd offset %u len %u, blocksize %d\n",
                    != 0) {
                        CERROR("page %d: odd offset %u len %u, blocksize %d\n",
-                              i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
-                              BD_GET_ENC_KIOV(desc, i).kiov_len,
+                              i, desc->bd_enc_vec[i].kiov_offset,
+                              desc->bd_enc_vec[i].kiov_len,
                               blocksize);
                        return -EFAULT;
                }
 
                if (adj_nob) {
                               blocksize);
                        return -EFAULT;
                }
 
                if (adj_nob) {
-                       if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
+                       if (ct_nob + desc->bd_enc_vec[i].kiov_len >
                            desc->bd_nob_transferred)
                            desc->bd_nob_transferred)
-                               BD_GET_ENC_KIOV(desc, i).kiov_len =
+                               desc->bd_enc_vec[i].kiov_len =
                                        desc->bd_nob_transferred - ct_nob;
 
                                        desc->bd_nob_transferred - ct_nob;
 
-                       BD_GET_KIOV(desc, i).kiov_len =
-                         BD_GET_ENC_KIOV(desc, i).kiov_len;
-                       if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
+                       desc->bd_vec[i].kiov_len =
+                         desc->bd_enc_vec[i].kiov_len;
+                       if (pt_nob + desc->bd_enc_vec[i].kiov_len >
                            desc->bd_nob)
                            desc->bd_nob)
-                               BD_GET_KIOV(desc, i).kiov_len =
+                               desc->bd_vec[i].kiov_len =
                                  desc->bd_nob - pt_nob;
                } else {
                        /* this should be guaranteed by LNET */
                                  desc->bd_nob - pt_nob;
                } else {
                        /* this should be guaranteed by LNET */
-                       LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
+                       LASSERT(ct_nob + desc->bd_enc_vec[i].
                                kiov_len <=
                                desc->bd_nob_transferred);
                                kiov_len <=
                                desc->bd_nob_transferred);
-                       LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
-                               BD_GET_ENC_KIOV(desc, i).kiov_len);
+                       LASSERT(desc->bd_vec[i].kiov_len <=
+                               desc->bd_enc_vec[i].kiov_len);
                }
 
                }
 
-               if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
+               if (desc->bd_enc_vec[i].kiov_len == 0)
                        continue;
 
                sg_init_table(&src, 1);
                        continue;
 
                sg_init_table(&src, 1);
-               sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
-                           BD_GET_ENC_KIOV(desc, i).kiov_len,
-                           BD_GET_ENC_KIOV(desc, i).kiov_offset);
+               sg_set_page(&src, desc->bd_enc_vec[i].kiov_page,
+                           desc->bd_enc_vec[i].kiov_len,
+                           desc->bd_enc_vec[i].kiov_offset);
                dst = src;
                dst = src;
-               if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
+               if (desc->bd_vec[i].kiov_len % blocksize == 0)
                        sg_assign_page(&dst,
                        sg_assign_page(&dst,
-                                      BD_GET_KIOV(desc, i).kiov_page);
+                                      desc->bd_vec[i].kiov_page);
 
                rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
                                                 src.length);
 
                rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
                                                 src.length);
@@ -893,17 +893,17 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                         return rc;
                 }
 
                         return rc;
                 }
 
-               if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
-                       memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
-                              BD_GET_KIOV(desc, i).kiov_offset,
-                              page_address(BD_GET_ENC_KIOV(desc, i).
+               if (desc->bd_vec[i].kiov_len % blocksize != 0) {
+                       memcpy(page_address(desc->bd_vec[i].kiov_page) +
+                              desc->bd_vec[i].kiov_offset,
+                              page_address(desc->bd_enc_vec[i].
                                            kiov_page) +
                                            kiov_page) +
-                              BD_GET_KIOV(desc, i).kiov_offset,
-                              BD_GET_KIOV(desc, i).kiov_len);
+                              desc->bd_vec[i].kiov_offset,
+                              desc->bd_vec[i].kiov_len);
                }
 
                }
 
-               ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
-               pt_nob += BD_GET_KIOV(desc, i).kiov_len;
+               ct_nob += desc->bd_enc_vec[i].kiov_len;
+               pt_nob += desc->bd_vec[i].kiov_len;
        }
 
         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
        }
 
         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
@@ -921,7 +921,7 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
        /* if needed, clear up the rest unused iovs */
        if (adj_nob)
                while (i < desc->bd_iov_count)
        /* if needed, clear up the rest unused iovs */
        if (adj_nob)
                while (i < desc->bd_iov_count)
-                       BD_GET_KIOV(desc, i++).kiov_len = 0;
+                       desc->bd_vec[i++].kiov_len = 0;
 
         /* decrypt tail (krb5 header) */
        rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
 
         /* decrypt tail (krb5 header) */
        rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
@@ -1107,27 +1107,27 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
        int                  blocksize, i;
 
        LASSERT(desc->bd_iov_count);
        int                  blocksize, i;
 
        LASSERT(desc->bd_iov_count);
-       LASSERT(GET_ENC_KIOV(desc));
+       LASSERT(desc->bd_enc_vec);
        LASSERT(kctx->kc_keye.kb_tfm);
 
        blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
        LASSERT(kctx->kc_keye.kb_tfm);
 
        blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
-               LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
+               LASSERT(desc->bd_enc_vec[i].kiov_page);
                /*
                 * offset should always start at page boundary of either
                 * client or server side.
                 */
                /*
                 * offset should always start at page boundary of either
                 * client or server side.
                 */
-               if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
+               if (desc->bd_vec[i].kiov_offset & blocksize) {
                        CERROR("odd offset %d in page %d\n",
                        CERROR("odd offset %d in page %d\n",
-                              BD_GET_KIOV(desc, i).kiov_offset, i);
+                              desc->bd_vec[i].kiov_offset, i);
                        return GSS_S_FAILURE;
                }
 
                        return GSS_S_FAILURE;
                }
 
-               BD_GET_ENC_KIOV(desc, i).kiov_offset =
-                       BD_GET_KIOV(desc, i).kiov_offset;
-               BD_GET_ENC_KIOV(desc, i).kiov_len =
-                       (BD_GET_KIOV(desc, i).kiov_len +
+               desc->bd_enc_vec[i].kiov_offset =
+                       desc->bd_vec[i].kiov_offset;
+               desc->bd_enc_vec[i].kiov_len =
+                       (desc->bd_vec[i].kiov_len +
                         blocksize - 1) & (~(blocksize - 1));
        }
 
                         blocksize - 1) & (~(blocksize - 1));
        }
 
@@ -1198,7 +1198,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
        /* compute checksum */
        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
                               khdr, 1, data_desc,
        /* compute checksum */
        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
                               khdr, 1, data_desc,
-                              desc->bd_iov_count, GET_KIOV(desc),
+                              desc->bd_iov_count, desc->bd_vec,
                               &cksum, gctx->hash_func))
                GOTO(out_free_cksum, major = GSS_S_FAILURE);
        LASSERT(cksum.len >= ke->ke_hash_size);
                               &cksum, gctx->hash_func))
                GOTO(out_free_cksum, major = GSS_S_FAILURE);
        LASSERT(cksum.len >= ke->ke_hash_size);
@@ -1490,7 +1490,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
                               khdr, 1, data_desc,
                               desc->bd_iov_count,
        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
                               khdr, 1, data_desc,
                               desc->bd_iov_count,
-                              GET_KIOV(desc),
+                              desc->bd_vec,
                               &cksum, gctx->hash_func))
                return GSS_S_FAILURE;
        LASSERT(cksum.len >= ke->ke_hash_size);
                               &cksum, gctx->hash_func))
                return GSS_S_FAILURE;
        LASSERT(cksum.len >= ke->ke_hash_size);
index 862ab89..8cdd1f7 100644 (file)
@@ -612,16 +612,16 @@ __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
        blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
        blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
-               if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
+               if (desc->bd_vec[i].kiov_offset & blocksize) {
                        CERROR("offset %d not blocksize aligned\n",
                        CERROR("offset %d not blocksize aligned\n",
-                              BD_GET_KIOV(desc, i).kiov_offset);
+                              desc->bd_vec[i].kiov_offset);
                        return GSS_S_FAILURE;
                }
 
                        return GSS_S_FAILURE;
                }
 
-               BD_GET_ENC_KIOV(desc, i).kiov_offset =
-                       BD_GET_KIOV(desc, i).kiov_offset;
-               BD_GET_ENC_KIOV(desc, i).kiov_len =
-                       sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize);
+               desc->bd_enc_vec[i].kiov_offset =
+                       desc->bd_vec[i].kiov_offset;
+               desc->bd_enc_vec[i].kiov_len =
+                       sk_block_mask(desc->bd_vec[i].kiov_len, blocksize);
        }
 
        return GSS_S_COMPLETE;
        }
 
        return GSS_S_COMPLETE;
@@ -649,17 +649,17 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
        sg_init_table(&ctxt, 1);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
        sg_init_table(&ctxt, 1);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
-               sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page,
-                           sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
+               sg_set_page(&ptxt, desc->bd_vec[i].kiov_page,
+                           sk_block_mask(desc->bd_vec[i].kiov_len,
                                          blocksize),
                                          blocksize),
-                           BD_GET_KIOV(desc, i).kiov_offset);
+                           desc->bd_vec[i].kiov_offset);
                nob += ptxt.length;
 
                nob += ptxt.length;
 
-               sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
+               sg_set_page(&ctxt, desc->bd_enc_vec[i].kiov_page,
                            ptxt.length, ptxt.offset);
 
                            ptxt.length, ptxt.offset);
 
-               BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
-               BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
+               desc->bd_enc_vec[i].kiov_offset = ctxt.offset;
+               desc->bd_enc_vec[i].kiov_len = ctxt.length;
 
                rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
                                                 ptxt.length);
 
                rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
                                                 ptxt.length);
@@ -704,8 +704,8 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
 
        for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
             i++) {
 
        for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
             i++) {
-               lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
-               lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
+               lnet_kiov_t *piov = &desc->bd_vec[i];
+               lnet_kiov_t *ciov = &desc->bd_enc_vec[i];
 
                if (ciov->kiov_offset % blocksize != 0 ||
                    ciov->kiov_len % blocksize != 0) {
 
                if (ciov->kiov_offset % blocksize != 0 ||
                    ciov->kiov_len % blocksize != 0) {
@@ -773,7 +773,7 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
        /* if needed, clear up the rest unused iovs */
        if (adj_nob)
                while (i < desc->bd_iov_count)
        /* if needed, clear up the rest unused iovs */
        if (adj_nob)
                while (i < desc->bd_iov_count)
-                       BD_GET_KIOV(desc, i++).kiov_len = 0;
+                       desc->bd_vec[i++].kiov_len = 0;
 
        if (unlikely(cnob != desc->bd_nob_transferred)) {
                CERROR("%d cipher text transferred but only %d decrypted\n",
 
        if (unlikely(cnob != desc->bd_nob_transferred)) {
                CERROR("%d cipher text transferred but only %d decrypted\n",
@@ -821,7 +821,7 @@ __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
        skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
        skw.skw_hmac.len = sht_bytes;
        if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher,
        skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
        skw.skw_hmac.len = sht_bytes;
        if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher,
-                        desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac,
+                        desc->bd_iov_count, desc->bd_enc_vec, &skw.skw_hmac,
                         gss_context->hash_func))
                return GSS_S_FAILURE;
 
                         gss_context->hash_func))
                return GSS_S_FAILURE;
 
@@ -859,7 +859,7 @@ __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
 
        rc = sk_verify_bulk_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1,
                                 &skw.skw_cipher, desc->bd_iov_count,
 
        rc = sk_verify_bulk_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1,
                                 &skw.skw_cipher, desc->bd_iov_count,
-                                GET_ENC_KIOV(desc), desc->bd_nob,
+                                desc->bd_enc_vec, desc->bd_nob,
                                 &skw.skw_hmac);
        if (rc)
                return rc;
                                 &skw.skw_hmac);
        if (rc)
                return rc;
index 18fb720..4566d88 100644 (file)
@@ -55,11 +55,11 @@ void ptlrpc_fill_bulk_md(struct lnet_md *md, struct ptlrpc_bulk_desc *desc,
        md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);
 
        md->options |= LNET_MD_KIOV;
        md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);
 
        md->options |= LNET_MD_KIOV;
-       if (GET_ENC_KIOV(desc))
-               md->start = &BD_GET_ENC_KIOV(desc, mdidx *
-                                            LNET_MAX_IOV);
+       if (desc->bd_enc_vec)
+               md->start = &desc->bd_enc_vec[mdidx *
+                                             LNET_MAX_IOV];
        else
        else
-               md->start = &BD_GET_KIOV(desc, mdidx * LNET_MAX_IOV);
+               md->start = &desc->bd_vec[mdidx * LNET_MAX_IOV];
 }
 
 
 }
 
 
index 6c5d64f..33b9a09 100644 (file)
@@ -542,12 +542,12 @@ int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
        LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
 
        /* resent bulk, enc iov might have been allocated previously */
        LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
 
        /* resent bulk, enc iov might have been allocated previously */
-       if (GET_ENC_KIOV(desc) != NULL)
+       if (desc->bd_enc_vec != NULL)
                return 0;
 
                return 0;
 
-       OBD_ALLOC_LARGE(GET_ENC_KIOV(desc),
-                 desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc)));
-       if (GET_ENC_KIOV(desc) == NULL)
+       OBD_ALLOC_LARGE(desc->bd_enc_vec,
+                 desc->bd_iov_count * sizeof(*desc->bd_enc_vec));
+       if (desc->bd_enc_vec == NULL)
                return -ENOMEM;
 
        spin_lock(&page_pools.epp_lock);
                return -ENOMEM;
 
        spin_lock(&page_pools.epp_lock);
@@ -601,10 +601,10 @@ again:
                                 */
                                page_pools.epp_st_outofmem++;
                                spin_unlock(&page_pools.epp_lock);
                                 */
                                page_pools.epp_st_outofmem++;
                                spin_unlock(&page_pools.epp_lock);
-                               OBD_FREE_LARGE(GET_ENC_KIOV(desc),
+                               OBD_FREE_LARGE(desc->bd_enc_vec,
                                               desc->bd_iov_count *
                                               desc->bd_iov_count *
-                                               sizeof(*GET_ENC_KIOV(desc)));
-                               GET_ENC_KIOV(desc) = NULL;
+                                               sizeof(*desc->bd_enc_vec));
+                               desc->bd_enc_vec = NULL;
                                return -ENOMEM;
                        }
                }
                                return -ENOMEM;
                        }
                }
@@ -632,7 +632,7 @@ again:
 
        for (i = 0; i < desc->bd_iov_count; i++) {
                LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
                LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
-               BD_GET_ENC_KIOV(desc, i).kiov_page =
+               desc->bd_enc_vec[i].kiov_page =
                       page_pools.epp_pools[p_idx][g_idx];
                page_pools.epp_pools[p_idx][g_idx] = NULL;
 
                       page_pools.epp_pools[p_idx][g_idx];
                page_pools.epp_pools[p_idx][g_idx] = NULL;
 
@@ -668,7 +668,7 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
        int p_idx, g_idx;
        int i;
 
        int p_idx, g_idx;
        int i;
 
-       if (GET_ENC_KIOV(desc) == NULL)
+       if (desc->bd_enc_vec == NULL)
                return;
 
        LASSERT(desc->bd_iov_count > 0);
                return;
 
        LASSERT(desc->bd_iov_count > 0);
@@ -683,12 +683,12 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
        LASSERT(page_pools.epp_pools[p_idx]);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
        LASSERT(page_pools.epp_pools[p_idx]);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
-               LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page != NULL);
+               LASSERT(desc->bd_enc_vec[i].kiov_page != NULL);
                LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
                LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
 
                page_pools.epp_pools[p_idx][g_idx] =
                LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
                LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
 
                page_pools.epp_pools[p_idx][g_idx] =
-                       BD_GET_ENC_KIOV(desc, i).kiov_page;
+                       desc->bd_enc_vec[i].kiov_page;
 
                if (++g_idx == PAGES_PER_POOL) {
                        p_idx++;
 
                if (++g_idx == PAGES_PER_POOL) {
                        p_idx++;
@@ -702,9 +702,9 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
 
        spin_unlock(&page_pools.epp_lock);
 
 
        spin_unlock(&page_pools.epp_lock);
 
-       OBD_FREE_LARGE(GET_ENC_KIOV(desc),
-                desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc)));
-       GET_ENC_KIOV(desc) = NULL;
+       OBD_FREE_LARGE(desc->bd_enc_vec,
+                desc->bd_iov_count * sizeof(*desc->bd_enc_vec));
+       desc->bd_enc_vec = NULL;
 }
 
 /*
 }
 
 /*
@@ -924,10 +924,10 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
 
        for (i = 0; i < desc->bd_iov_count; i++) {
                cfs_crypto_hash_update_page(req,
 
        for (i = 0; i < desc->bd_iov_count; i++) {
                cfs_crypto_hash_update_page(req,
-                                 BD_GET_KIOV(desc, i).kiov_page,
-                                 BD_GET_KIOV(desc, i).kiov_offset &
+                                 desc->bd_vec[i].kiov_page,
+                                 desc->bd_vec[i].kiov_offset &
                                              ~PAGE_MASK,
                                              ~PAGE_MASK,
-                                 BD_GET_KIOV(desc, i).kiov_len);
+                                 desc->bd_vec[i].kiov_len);
        }
 
        if (hashsize > buflen) {
        }
 
        if (hashsize > buflen) {
index 82310b6..881add9 100644 (file)
@@ -156,13 +156,13 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
        unsigned int off, i;
 
        for (i = 0; i < desc->bd_iov_count; i++) {
        unsigned int off, i;
 
        for (i = 0; i < desc->bd_iov_count; i++) {
-               if (BD_GET_KIOV(desc, i).kiov_len == 0)
+               if (desc->bd_vec[i].kiov_len == 0)
                        continue;
 
                        continue;
 
-               ptr = kmap(BD_GET_KIOV(desc, i).kiov_page);
-               off = BD_GET_KIOV(desc, i).kiov_offset & ~PAGE_MASK;
+               ptr = kmap(desc->bd_vec[i].kiov_page);
+               off = desc->bd_vec[i].kiov_offset & ~PAGE_MASK;
                ptr[off] ^= 0x1;
                ptr[off] ^= 0x1;
-               kunmap(BD_GET_KIOV(desc, i).kiov_page);
+               kunmap(desc->bd_vec[i].kiov_page);
                return;
        }
 }
                return;
        }
 }
@@ -355,12 +355,12 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
 
        /* fix the actual data size */
        for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
 
        /* fix the actual data size */
        for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
-               if (BD_GET_KIOV(desc, i).kiov_len +
+               if (desc->bd_vec[i].kiov_len +
                    nob > desc->bd_nob_transferred) {
                    nob > desc->bd_nob_transferred) {
-                       BD_GET_KIOV(desc, i).kiov_len =
+                       desc->bd_vec[i].kiov_len =
                                desc->bd_nob_transferred - nob;
                }
                                desc->bd_nob_transferred - nob;
                }
-               nob += BD_GET_KIOV(desc, i).kiov_len;
+               nob += desc->bd_vec[i].kiov_len;
        }
 
        rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
        }
 
        rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,