From 816c9f01d5b44e05437b890aab4ef50edc02230f Mon Sep 17 00:00:00 2001 From: Mr NeilBrown Date: Wed, 4 Dec 2019 13:38:05 +1100 Subject: [PATCH] LU-13004 ptlrpc: simplify bd_vec access. Now that there are no kvecs in ptlrpc_bulk_desc, only bdvecs, we can simplify the access, discarding the containing struct and the macros, and just accessing the fields directly. Signed-off-by: Mr NeilBrown Change-Id: I068a7a280f130bf0b53b9c572ed47ef0cc999102 Reviewed-on: https://review.whamcloud.com/36973 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Shaun Tancheff Reviewed-by: James Simmons Reviewed-by: Oleg Drokin --- lustre/include/lustre_net.h | 15 ++----- lustre/osc/osc_page.c | 2 +- lustre/ptlrpc/client.c | 14 +++--- lustre/ptlrpc/gss/gss_bulk.c | 14 +++--- lustre/ptlrpc/gss/gss_krb5_mech.c | 92 +++++++++++++++++++-------------------- lustre/ptlrpc/gss/gss_sk_mech.c | 34 +++++++-------- lustre/ptlrpc/pers.c | 8 ++-- lustre/ptlrpc/sec_bulk.c | 34 +++++++-------- lustre/ptlrpc/sec_plain.c | 14 +++--- 9 files changed, 109 insertions(+), 118 deletions(-) diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index a8420ba..678e482 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -1447,20 +1447,11 @@ struct ptlrpc_bulk_desc { /** array of associated MDs */ struct lnet_handle_md bd_mds[PTLRPC_BULK_OPS_COUNT]; - struct { - /* - * encrypt iov, size is either 0 or bd_iov_count. - */ - lnet_kiov_t *bd_enc_vec; - lnet_kiov_t *bd_vec; - } bd_kiov; + /* encrypted iov, size is either 0 or bd_iov_count. */ + lnet_kiov_t *bd_enc_vec; + lnet_kiov_t *bd_vec; }; -#define GET_KIOV(desc) ((desc)->bd_kiov.bd_vec) -#define BD_GET_KIOV(desc, i) ((desc)->bd_kiov.bd_vec[i]) -#define GET_ENC_KIOV(desc) ((desc)->bd_kiov.bd_enc_vec) -#define BD_GET_ENC_KIOV(desc, i) ((desc)->bd_kiov.bd_enc_vec[i]) - enum { SVC_INIT = 0, SVC_STOPPED = 1 << 0, diff --git a/lustre/osc/osc_page.c b/lustre/osc/osc_page.c index 9dbad53..d0fd5e2 100644 --- a/lustre/osc/osc_page.c +++ b/lustre/osc/osc_page.c @@ -893,7 +893,7 @@ static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc, for (i = 0; i < page_count; i++) { void *pz; if (desc) - pz = page_zone(BD_GET_KIOV(desc, i).kiov_page); + pz = page_zone(desc->bd_vec[i].kiov_page); else pz = page_zone(aa->aa_ppga[i]->pg); diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index 740019a..ba4c079 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -66,7 +66,7 @@ static void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc) int i; for (i = 0; i < desc->bd_iov_count ; i++) - put_page(BD_GET_KIOV(desc, i).kiov_page); + put_page(desc->bd_vec[i].kiov_page); } static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc, @@ -173,9 +173,9 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags, if (!desc) return NULL; - OBD_ALLOC_LARGE(GET_KIOV(desc), - nfrags * sizeof(*GET_KIOV(desc))); - if (!GET_KIOV(desc)) + OBD_ALLOC_LARGE(desc->bd_vec, + nfrags * sizeof(*desc->bd_vec)); + if (!desc->bd_vec) goto out; spin_lock_init(&desc->bd_lock); @@ -251,7 +251,7 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, LASSERT(len > 0); LASSERT(pageoffset + len <= PAGE_SIZE); - kiov = &BD_GET_KIOV(desc, desc->bd_iov_count); + kiov = &desc->bd_vec[desc->bd_iov_count]; desc->bd_nob += len; @@ -286,8 +286,8 @@ void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) if (desc->bd_frag_ops->release_frags != NULL) desc->bd_frag_ops->release_frags(desc); - OBD_FREE_LARGE(GET_KIOV(desc), - desc->bd_max_iov * sizeof(*GET_KIOV(desc))); + OBD_FREE_LARGE(desc->bd_vec, + desc->bd_max_iov * sizeof(*desc->bd_vec)); OBD_FREE_PTR(desc); EXIT; } diff --git a/lustre/ptlrpc/gss/gss_bulk.c b/lustre/ptlrpc/gss/gss_bulk.c index b418ea7..59fbd21 100644 --- a/lustre/ptlrpc/gss/gss_bulk.c +++ b/lustre/ptlrpc/gss/gss_bulk.c @@ -126,7 +126,7 @@ int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx, maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL, desc->bd_iov_count, - GET_KIOV(desc), + desc->bd_vec, &token); if (maj != GSS_S_COMPLETE) { CWARN("failed to sign bulk data: %x\n", maj); @@ -251,12 +251,12 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, /* fix the actual data size */ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { - if (BD_GET_KIOV(desc, i).kiov_len + nob > + if (desc->bd_vec[i].kiov_len + nob > desc->bd_nob_transferred) { - BD_GET_KIOV(desc, i).kiov_len = + desc->bd_vec[i].kiov_len = desc->bd_nob_transferred - nob; } - nob += BD_GET_KIOV(desc, i).kiov_len; + nob += desc->bd_vec[i].kiov_len; } token.data = bsdv->bsd_data; @@ -265,7 +265,7 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL, desc->bd_iov_count, - GET_KIOV(desc), + desc->bd_vec, &token); if (maj != GSS_S_COMPLETE) { CERROR("failed to verify bulk read: %x\n", maj); @@ -400,7 +400,7 @@ int gss_svc_unwrap_bulk(struct ptlrpc_request *req, maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL, desc->bd_iov_count, - GET_KIOV(desc), &token); + desc->bd_vec, &token); if (maj != GSS_S_COMPLETE) { bsdv->bsd_flags |= BSD_FL_ERR; CERROR("failed to verify bulk signature: %x\n", maj); @@ -477,7 +477,7 @@ int gss_svc_wrap_bulk(struct ptlrpc_request *req, maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL, desc->bd_iov_count, - GET_KIOV(desc), &token); + desc->bd_vec, &token); if (maj != GSS_S_COMPLETE) { bsdv->bsd_flags |= BSD_FL_ERR; CERROR("failed to sign bulk data: %x\n", maj); diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index 5ff7b3b..b85afa6 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -682,7 +682,7 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, int blocksize, i, rc, nob = 0; LASSERT(desc->bd_iov_count); - LASSERT(GET_ENC_KIOV(desc)); + LASSERT(desc->bd_enc_vec); blocksize = crypto_blkcipher_blocksize(tfm); LASSERT(blocksize > 1); @@ -717,19 +717,19 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, /* encrypt clear pages */ for (i = 0; i < desc->bd_iov_count; i++) { sg_init_table(&src, 1); - sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page, - (BD_GET_KIOV(desc, i).kiov_len + + sg_set_page(&src, desc->bd_vec[i].kiov_page, + (desc->bd_vec[i].kiov_len + blocksize - 1) & (~(blocksize - 1)), - BD_GET_KIOV(desc, i).kiov_offset); + desc->bd_vec[i].kiov_offset); if (adj_nob) nob += src.length; sg_init_table(&dst, 1); - sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page, + sg_set_page(&dst, desc->bd_enc_vec[i].kiov_page, src.length, src.offset); - BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset; - BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length; + desc->bd_enc_vec[i].kiov_offset = dst.offset; + desc->bd_enc_vec[i].kiov_len = dst.length; rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, src.length); @@ -802,7 +802,7 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, int blocksize, i, rc; LASSERT(desc->bd_iov_count); - LASSERT(GET_ENC_KIOV(desc)); + LASSERT(desc->bd_enc_vec); LASSERT(desc->bd_nob_transferred); blocksize = crypto_blkcipher_blocksize(tfm); @@ -842,49 +842,49 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred; i++) { - if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize + if (desc->bd_enc_vec[i].kiov_offset % blocksize != 0 || - BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize + desc->bd_enc_vec[i].kiov_len % blocksize != 0) { CERROR("page %d: odd offset %u len %u, blocksize %d\n", - i, BD_GET_ENC_KIOV(desc, i).kiov_offset, - BD_GET_ENC_KIOV(desc, i).kiov_len, + i, desc->bd_enc_vec[i].kiov_offset, + desc->bd_enc_vec[i].kiov_len, blocksize); return -EFAULT; } if (adj_nob) { - if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len > + if (ct_nob + desc->bd_enc_vec[i].kiov_len > desc->bd_nob_transferred) - BD_GET_ENC_KIOV(desc, i).kiov_len = + desc->bd_enc_vec[i].kiov_len = desc->bd_nob_transferred - ct_nob; - BD_GET_KIOV(desc, i).kiov_len = - BD_GET_ENC_KIOV(desc, i).kiov_len; - if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len > + desc->bd_vec[i].kiov_len = + desc->bd_enc_vec[i].kiov_len; + if (pt_nob + desc->bd_enc_vec[i].kiov_len > desc->bd_nob) - BD_GET_KIOV(desc, i).kiov_len = + desc->bd_vec[i].kiov_len = desc->bd_nob - pt_nob; } else { /* this should be guaranteed by LNET */ - LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i). + LASSERT(ct_nob + desc->bd_enc_vec[i]. kiov_len <= desc->bd_nob_transferred); - LASSERT(BD_GET_KIOV(desc, i).kiov_len <= - BD_GET_ENC_KIOV(desc, i).kiov_len); + LASSERT(desc->bd_vec[i].kiov_len <= + desc->bd_enc_vec[i].kiov_len); } - if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0) + if (desc->bd_enc_vec[i].kiov_len == 0) continue; sg_init_table(&src, 1); - sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page, - BD_GET_ENC_KIOV(desc, i).kiov_len, - BD_GET_ENC_KIOV(desc, i).kiov_offset); + sg_set_page(&src, desc->bd_enc_vec[i].kiov_page, + desc->bd_enc_vec[i].kiov_len, + desc->bd_enc_vec[i].kiov_offset); dst = src; - if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0) + if (desc->bd_vec[i].kiov_len % blocksize == 0) sg_assign_page(&dst, - BD_GET_KIOV(desc, i).kiov_page); + desc->bd_vec[i].kiov_page); rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, src.length); @@ -893,17 +893,17 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, return rc; } - if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) { - memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) + - BD_GET_KIOV(desc, i).kiov_offset, - page_address(BD_GET_ENC_KIOV(desc, i). + if (desc->bd_vec[i].kiov_len % blocksize != 0) { + memcpy(page_address(desc->bd_vec[i].kiov_page) + + desc->bd_vec[i].kiov_offset, + page_address(desc->bd_enc_vec[i]. kiov_page) + - BD_GET_KIOV(desc, i).kiov_offset, - BD_GET_KIOV(desc, i).kiov_len); + desc->bd_vec[i].kiov_offset, + desc->bd_vec[i].kiov_len); } - ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len; - pt_nob += BD_GET_KIOV(desc, i).kiov_len; + ct_nob += desc->bd_enc_vec[i].kiov_len; + pt_nob += desc->bd_vec[i].kiov_len; } if (unlikely(ct_nob != desc->bd_nob_transferred)) { @@ -921,7 +921,7 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, /* if needed, clear up the rest unused iovs */ if (adj_nob) while (i < desc->bd_iov_count) - BD_GET_KIOV(desc, i++).kiov_len = 0; + desc->bd_vec[i++].kiov_len = 0; /* decrypt tail (krb5 header) */ rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize, @@ -1107,27 +1107,27 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx, int blocksize, i; LASSERT(desc->bd_iov_count); - LASSERT(GET_ENC_KIOV(desc)); + LASSERT(desc->bd_enc_vec); LASSERT(kctx->kc_keye.kb_tfm); blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); for (i = 0; i < desc->bd_iov_count; i++) { - LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page); + LASSERT(desc->bd_enc_vec[i].kiov_page); /* * offset should always start at page boundary of either * client or server side. */ - if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) { + if (desc->bd_vec[i].kiov_offset & blocksize) { CERROR("odd offset %d in page %d\n", - BD_GET_KIOV(desc, i).kiov_offset, i); + desc->bd_vec[i].kiov_offset, i); return GSS_S_FAILURE; } - BD_GET_ENC_KIOV(desc, i).kiov_offset = - BD_GET_KIOV(desc, i).kiov_offset; - BD_GET_ENC_KIOV(desc, i).kiov_len = - (BD_GET_KIOV(desc, i).kiov_len + + desc->bd_enc_vec[i].kiov_offset = + desc->bd_vec[i].kiov_offset; + desc->bd_enc_vec[i].kiov_len = + (desc->bd_vec[i].kiov_len + blocksize - 1) & (~(blocksize - 1)); } @@ -1198,7 +1198,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, /* compute checksum */ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, khdr, 1, data_desc, - desc->bd_iov_count, GET_KIOV(desc), + desc->bd_iov_count, desc->bd_vec, &cksum, gctx->hash_func)) GOTO(out_free_cksum, major = GSS_S_FAILURE); LASSERT(cksum.len >= ke->ke_hash_size); @@ -1490,7 +1490,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, khdr, 1, data_desc, desc->bd_iov_count, - GET_KIOV(desc), + desc->bd_vec, &cksum, gctx->hash_func)) return GSS_S_FAILURE; LASSERT(cksum.len >= ke->ke_hash_size); diff --git a/lustre/ptlrpc/gss/gss_sk_mech.c b/lustre/ptlrpc/gss/gss_sk_mech.c index 862ab89..8cdd1f7 100644 --- a/lustre/ptlrpc/gss/gss_sk_mech.c +++ b/lustre/ptlrpc/gss/gss_sk_mech.c @@ -612,16 +612,16 @@ __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context, blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm); for (i = 0; i < desc->bd_iov_count; i++) { - if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) { + if (desc->bd_vec[i].kiov_offset & blocksize) { CERROR("offset %d not blocksize aligned\n", - BD_GET_KIOV(desc, i).kiov_offset); + desc->bd_vec[i].kiov_offset); return GSS_S_FAILURE; } - BD_GET_ENC_KIOV(desc, i).kiov_offset = - BD_GET_KIOV(desc, i).kiov_offset; - BD_GET_ENC_KIOV(desc, i).kiov_len = - sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize); + desc->bd_enc_vec[i].kiov_offset = + desc->bd_vec[i].kiov_offset; + desc->bd_enc_vec[i].kiov_len = + sk_block_mask(desc->bd_vec[i].kiov_len, blocksize); } return GSS_S_COMPLETE; @@ -649,17 +649,17 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, sg_init_table(&ctxt, 1); for (i = 0; i < desc->bd_iov_count; i++) { - sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page, - sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, + sg_set_page(&ptxt, desc->bd_vec[i].kiov_page, + sk_block_mask(desc->bd_vec[i].kiov_len, blocksize), - BD_GET_KIOV(desc, i).kiov_offset); + desc->bd_vec[i].kiov_offset); nob += ptxt.length; - sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page, + sg_set_page(&ctxt, desc->bd_enc_vec[i].kiov_page, ptxt.length, ptxt.offset); - BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset; - BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length; + desc->bd_enc_vec[i].kiov_offset = ctxt.offset; + desc->bd_enc_vec[i].kiov_len = ctxt.length; rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt, ptxt.length); @@ -704,8 +704,8 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred; i++) { - lnet_kiov_t *piov = &BD_GET_KIOV(desc, i); - lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i); + lnet_kiov_t *piov = &desc->bd_vec[i]; + lnet_kiov_t *ciov = &desc->bd_enc_vec[i]; if (ciov->kiov_offset % blocksize != 0 || ciov->kiov_len % blocksize != 0) { @@ -773,7 +773,7 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, /* if needed, clear up the rest unused iovs */ if (adj_nob) while (i < desc->bd_iov_count) - BD_GET_KIOV(desc, i++).kiov_len = 0; + desc->bd_vec[i++].kiov_len = 0; if (unlikely(cnob != desc->bd_nob_transferred)) { CERROR("%d cipher text transferred but only %d decrypted\n", @@ -821,7 +821,7 @@ __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context, skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len; skw.skw_hmac.len = sht_bytes; if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher, - desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac, + desc->bd_iov_count, desc->bd_enc_vec, &skw.skw_hmac, gss_context->hash_func)) return GSS_S_FAILURE; @@ -859,7 +859,7 @@ __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context, rc = sk_verify_bulk_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher, desc->bd_iov_count, - GET_ENC_KIOV(desc), desc->bd_nob, + desc->bd_enc_vec, desc->bd_nob, &skw.skw_hmac); if (rc) return rc; diff --git a/lustre/ptlrpc/pers.c b/lustre/ptlrpc/pers.c index 18fb720..4566d88 100644 --- a/lustre/ptlrpc/pers.c +++ b/lustre/ptlrpc/pers.c @@ -55,11 +55,11 @@ void ptlrpc_fill_bulk_md(struct lnet_md *md, struct ptlrpc_bulk_desc *desc, md->length = min_t(unsigned int, LNET_MAX_IOV, md->length); md->options |= LNET_MD_KIOV; - if (GET_ENC_KIOV(desc)) - md->start = &BD_GET_ENC_KIOV(desc, mdidx * - LNET_MAX_IOV); + if (desc->bd_enc_vec) + md->start = &desc->bd_enc_vec[mdidx * + LNET_MAX_IOV]; else - md->start = &BD_GET_KIOV(desc, mdidx * LNET_MAX_IOV); + md->start = &desc->bd_vec[mdidx * LNET_MAX_IOV]; } diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c index 6c5d64f..33b9a09 100644 --- a/lustre/ptlrpc/sec_bulk.c +++ b/lustre/ptlrpc/sec_bulk.c @@ -542,12 +542,12 @@ int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc) LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages); /* resent bulk, enc iov might have been allocated previously */ - if (GET_ENC_KIOV(desc) != NULL) + if (desc->bd_enc_vec != NULL) return 0; - OBD_ALLOC_LARGE(GET_ENC_KIOV(desc), - desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc))); - if (GET_ENC_KIOV(desc) == NULL) + OBD_ALLOC_LARGE(desc->bd_enc_vec, + desc->bd_iov_count * sizeof(*desc->bd_enc_vec)); + if (desc->bd_enc_vec == NULL) return -ENOMEM; spin_lock(&page_pools.epp_lock); @@ -601,10 +601,10 @@ again: */ page_pools.epp_st_outofmem++; spin_unlock(&page_pools.epp_lock); - OBD_FREE_LARGE(GET_ENC_KIOV(desc), + OBD_FREE_LARGE(desc->bd_enc_vec, desc->bd_iov_count * - sizeof(*GET_ENC_KIOV(desc))); - GET_ENC_KIOV(desc) = NULL; + sizeof(*desc->bd_enc_vec)); + desc->bd_enc_vec = NULL; return -ENOMEM; } } @@ -632,7 +632,7 @@ again: for (i = 0; i < desc->bd_iov_count; i++) { LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL); - BD_GET_ENC_KIOV(desc, i).kiov_page = + desc->bd_enc_vec[i].kiov_page = page_pools.epp_pools[p_idx][g_idx]; page_pools.epp_pools[p_idx][g_idx] = NULL; @@ -668,7 +668,7 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) int p_idx, g_idx; int i; - if (GET_ENC_KIOV(desc) == NULL) + if (desc->bd_enc_vec == NULL) return; LASSERT(desc->bd_iov_count > 0); @@ -683,12 +683,12 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) LASSERT(page_pools.epp_pools[p_idx]); for (i = 0; i < desc->bd_iov_count; i++) { - LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page != NULL); + LASSERT(desc->bd_enc_vec[i].kiov_page != NULL); LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]); LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL); page_pools.epp_pools[p_idx][g_idx] = - BD_GET_ENC_KIOV(desc, i).kiov_page; + desc->bd_enc_vec[i].kiov_page; if (++g_idx == PAGES_PER_POOL) { p_idx++; @@ -702,9 +702,9 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) spin_unlock(&page_pools.epp_lock); - OBD_FREE_LARGE(GET_ENC_KIOV(desc), - desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc))); - GET_ENC_KIOV(desc) = NULL; + OBD_FREE_LARGE(desc->bd_enc_vec, + desc->bd_iov_count * sizeof(*desc->bd_enc_vec)); + desc->bd_enc_vec = NULL; } /* @@ -924,10 +924,10 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, for (i = 0; i < desc->bd_iov_count; i++) { cfs_crypto_hash_update_page(req, - BD_GET_KIOV(desc, i).kiov_page, - BD_GET_KIOV(desc, i).kiov_offset & + desc->bd_vec[i].kiov_page, + desc->bd_vec[i].kiov_offset & ~PAGE_MASK, - BD_GET_KIOV(desc, i).kiov_len); + desc->bd_vec[i].kiov_len); } if (hashsize > buflen) { diff --git a/lustre/ptlrpc/sec_plain.c b/lustre/ptlrpc/sec_plain.c index 82310b6..881add9 100644 --- a/lustre/ptlrpc/sec_plain.c +++ b/lustre/ptlrpc/sec_plain.c @@ -156,13 +156,13 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc) unsigned int off, i; for (i = 0; i < desc->bd_iov_count; i++) { - if (BD_GET_KIOV(desc, i).kiov_len == 0) + if (desc->bd_vec[i].kiov_len == 0) continue; - ptr = kmap(BD_GET_KIOV(desc, i).kiov_page); - off = BD_GET_KIOV(desc, i).kiov_offset & ~PAGE_MASK; + ptr = kmap(desc->bd_vec[i].kiov_page); + off = desc->bd_vec[i].kiov_offset & ~PAGE_MASK; ptr[off] ^= 0x1; - kunmap(BD_GET_KIOV(desc, i).kiov_page); + kunmap(desc->bd_vec[i].kiov_page); return; } } @@ -355,12 +355,12 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, /* fix the actual data size */ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { - if (BD_GET_KIOV(desc, i).kiov_len + + if (desc->bd_vec[i].kiov_len + nob > desc->bd_nob_transferred) { - BD_GET_KIOV(desc, i).kiov_len = + desc->bd_vec[i].kiov_len = desc->bd_nob_transferred - nob; } - nob += BD_GET_KIOV(desc, i).kiov_len; + nob += desc->bd_vec[i].kiov_len; } rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg, -- 1.8.3.1