Whamcloud - gitweb
LU-13004 ptlrpc: simplify bd_vec access.
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_sk_mech.c
index f6b62f6..8cdd1f7 100644 (file)
@@ -409,7 +409,7 @@ u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac, rawobj_t *key,
        rawobj_t checksum = RAWOBJ_EMPTY;
        struct ahash_request *req;
        struct scatterlist sg[1];
-       int rc = GSS_S_FAILURE;
+       int rc = 0;
        struct sg_table sgt;
        int bytes;
        int i;
@@ -423,11 +423,13 @@ u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac, rawobj_t *key,
 
        OBD_ALLOC_LARGE(checksum.data, checksum.len);
        if (!checksum.data)
-               return rc;
+               return GSS_S_FAILURE;
 
        req = cfs_crypto_hash_init(sc_hmac, key->data, key->len);
-       if (IS_ERR(req))
+       if (IS_ERR(req)) {
+               rc = GSS_S_FAILURE;
                goto cleanup;
+       }
 
        for (i = 0; i < msgcnt; i++) {
                if (!msgs[i].len)
@@ -463,15 +465,15 @@ u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac, rawobj_t *key,
                        goto hash_cleanup;
        }
 
-       if (memcmp(token->data, checksum.data, checksum.len)) {
-               rc = GSS_S_BAD_SIG;
-               goto hash_cleanup;
-       }
-
-       rc = GSS_S_COMPLETE;
-
 hash_cleanup:
        cfs_crypto_hash_final(req, checksum.data, &checksum.len);
+       if (rc)
+               goto cleanup;
+
+       if (memcmp(token->data, checksum.data, checksum.len))
+               rc = GSS_S_BAD_SIG;
+       else
+               rc = GSS_S_COMPLETE;
 
 cleanup:
        OBD_FREE_LARGE(checksum.data, checksum.len);
@@ -610,16 +612,16 @@ __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
        blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
-               if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
+               if (desc->bd_vec[i].kiov_offset & blocksize) {
                        CERROR("offset %d not blocksize aligned\n",
-                              BD_GET_KIOV(desc, i).kiov_offset);
+                              desc->bd_vec[i].kiov_offset);
                        return GSS_S_FAILURE;
                }
 
-               BD_GET_ENC_KIOV(desc, i).kiov_offset =
-                       BD_GET_KIOV(desc, i).kiov_offset;
-               BD_GET_ENC_KIOV(desc, i).kiov_len =
-                       sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize);
+               desc->bd_enc_vec[i].kiov_offset =
+                       desc->bd_vec[i].kiov_offset;
+               desc->bd_enc_vec[i].kiov_len =
+                       sk_block_mask(desc->bd_vec[i].kiov_len, blocksize);
        }
 
        return GSS_S_COMPLETE;
@@ -647,17 +649,17 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
        sg_init_table(&ctxt, 1);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
-               sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page,
-                           sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
+               sg_set_page(&ptxt, desc->bd_vec[i].kiov_page,
+                           sk_block_mask(desc->bd_vec[i].kiov_len,
                                          blocksize),
-                           BD_GET_KIOV(desc, i).kiov_offset);
+                           desc->bd_vec[i].kiov_offset);
                nob += ptxt.length;
 
-               sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
+               sg_set_page(&ctxt, desc->bd_enc_vec[i].kiov_page,
                            ptxt.length, ptxt.offset);
 
-               BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
-               BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
+               desc->bd_enc_vec[i].kiov_offset = ctxt.offset;
+               desc->bd_enc_vec[i].kiov_len = ctxt.length;
 
                rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
                                                 ptxt.length);
@@ -702,8 +704,8 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
 
        for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
             i++) {
-               lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
-               lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
+               lnet_kiov_t *piov = &desc->bd_vec[i];
+               lnet_kiov_t *ciov = &desc->bd_enc_vec[i];
 
                if (ciov->kiov_offset % blocksize != 0 ||
                    ciov->kiov_len % blocksize != 0) {
@@ -771,7 +773,7 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
        /* if needed, clear up the rest unused iovs */
        if (adj_nob)
                while (i < desc->bd_iov_count)
-                       BD_GET_KIOV(desc, i++).kiov_len = 0;
+                       desc->bd_vec[i++].kiov_len = 0;
 
        if (unlikely(cnob != desc->bd_nob_transferred)) {
                CERROR("%d cipher text transferred but only %d decrypted\n",
@@ -819,7 +821,7 @@ __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
        skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
        skw.skw_hmac.len = sht_bytes;
        if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher,
-                        desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac,
+                        desc->bd_iov_count, desc->bd_enc_vec, &skw.skw_hmac,
                         gss_context->hash_func))
                return GSS_S_FAILURE;
 
@@ -857,7 +859,7 @@ __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
 
        rc = sk_verify_bulk_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1,
                                 &skw.skw_cipher, desc->bd_iov_count,
-                                GET_ENC_KIOV(desc), desc->bd_nob,
+                                desc->bd_enc_vec, desc->bd_nob,
                                 &skw.skw_hmac);
        if (rc)
                return rc;