Whamcloud - gitweb
LU-5835 ptlrpc: Introduce iovec to bulk descriptor
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
index b43cb04..0a43598 100644 (file)
@@ -949,8 +949,9 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
         struct scatterlist      src, dst;
         int                     blocksize, i, rc, nob = 0;
 
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
         LASSERT(desc->bd_iov_count);
-        LASSERT(desc->bd_enc_iov);
+       LASSERT(GET_ENC_KIOV(desc));
 
        blocksize = crypto_blkcipher_blocksize(tfm);
         LASSERT(blocksize > 1);
@@ -973,18 +974,19 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
         /* encrypt clear pages */
         for (i = 0; i < desc->bd_iov_count; i++) {
                sg_init_table(&src, 1);
-               sg_set_page(&src, desc->bd_iov[i].kiov_page,
-                           (desc->bd_iov[i].kiov_len + blocksize - 1) &
+               sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
+                           (BD_GET_KIOV(desc, i).kiov_len +
+                               blocksize - 1) &
                            (~(blocksize - 1)),
-                           desc->bd_iov[i].kiov_offset);
+                           BD_GET_KIOV(desc, i).kiov_offset);
                if (adj_nob)
                        nob += src.length;
                sg_init_table(&dst, 1);
-               sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
-                           src.offset);
+               sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
+                           src.length, src.offset);
 
-                desc->bd_enc_iov[i].kiov_offset = dst.offset;
-                desc->bd_enc_iov[i].kiov_len = dst.length;
+               BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
+               BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
 
                rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
                                                     src.length);
@@ -1019,12 +1021,14 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
  * plain text size.
  * - for client read: we don't know data size for each page, so
  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
- *   be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
+ *   be smaller, so we need to adjust it according to
+ *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
  *   this means we DO NOT support the situation that server send an odd size
  *   data in a page which is not the last one.
  * - for server write: we knows exactly data size for each page being expected,
  *   thus kiov_len is accurate already, so we should not adjust it at all.
- *   and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
+ *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
+ *   round_up(bd_iov[]->kiov_len) which
  *   should have been done by prep_bulk().
  */
 static
@@ -1041,8 +1045,9 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
         int                     ct_nob = 0, pt_nob = 0;
         int                     blocksize, i, rc;
 
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
         LASSERT(desc->bd_iov_count);
-        LASSERT(desc->bd_enc_iov);
+       LASSERT(GET_ENC_KIOV(desc));
         LASSERT(desc->bd_nob_transferred);
 
        blocksize = crypto_blkcipher_blocksize(tfm);
@@ -1068,43 +1073,51 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                 return rc;
         }
 
-        for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
-             i++) {
-                if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
-                    desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
-                        CERROR("page %d: odd offset %u len %u, blocksize %d\n",
-                               i, desc->bd_enc_iov[i].kiov_offset,
-                               desc->bd_enc_iov[i].kiov_len, blocksize);
-                        return -EFAULT;
-                }
-
-                if (adj_nob) {
-                        if (ct_nob + desc->bd_enc_iov[i].kiov_len >
-                            desc->bd_nob_transferred)
-                                desc->bd_enc_iov[i].kiov_len =
-                                        desc->bd_nob_transferred - ct_nob;
+       for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
+            i++) {
+               if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
+                   != 0 ||
+                   BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
+                   != 0) {
+                       CERROR("page %d: odd offset %u len %u, blocksize %d\n",
+                              i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
+                              BD_GET_ENC_KIOV(desc, i).kiov_len,
+                              blocksize);
+                       return -EFAULT;
+               }
 
-                        desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
-                        if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
-                                desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
-                } else {
-                        /* this should be guaranteed by LNET */
-                        LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
-                                desc->bd_nob_transferred);
-                        LASSERT(desc->bd_iov[i].kiov_len <=
-                                desc->bd_enc_iov[i].kiov_len);
-                }
+               if (adj_nob) {
+                       if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
+                           desc->bd_nob_transferred)
+                               BD_GET_ENC_KIOV(desc, i).kiov_len =
+                                       desc->bd_nob_transferred - ct_nob;
+
+                       BD_GET_KIOV(desc, i).kiov_len =
+                         BD_GET_ENC_KIOV(desc, i).kiov_len;
+                       if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
+                           desc->bd_nob)
+                               BD_GET_KIOV(desc, i).kiov_len =
+                                 desc->bd_nob - pt_nob;
+               } else {
+                       /* this should be guaranteed by LNET */
+                       LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
+                               kiov_len <=
+                               desc->bd_nob_transferred);
+                       LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
+                               BD_GET_ENC_KIOV(desc, i).kiov_len);
+               }
 
-                if (desc->bd_enc_iov[i].kiov_len == 0)
-                        continue;
+               if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
+                       continue;
 
                sg_init_table(&src, 1);
-               sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
-                           desc->bd_enc_iov[i].kiov_len,
-                           desc->bd_enc_iov[i].kiov_offset);
+               sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
+                           BD_GET_ENC_KIOV(desc, i).kiov_len,
+                           BD_GET_ENC_KIOV(desc, i).kiov_offset);
                dst = src;
-               if (desc->bd_iov[i].kiov_len % blocksize == 0)
-                       sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
+               if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
+                       sg_assign_page(&dst,
+                                      BD_GET_KIOV(desc, i).kiov_page);
 
                rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
                                                 src.length);
@@ -1113,17 +1126,18 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                         return rc;
                 }
 
-                if (desc->bd_iov[i].kiov_len % blocksize != 0) {
-                       memcpy(page_address(desc->bd_iov[i].kiov_page) +
-                               desc->bd_iov[i].kiov_offset,
-                              page_address(desc->bd_enc_iov[i].kiov_page) +
-                               desc->bd_iov[i].kiov_offset,
-                               desc->bd_iov[i].kiov_len);
-                }
+               if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
+                       memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
+                              BD_GET_KIOV(desc, i).kiov_offset,
+                              page_address(BD_GET_ENC_KIOV(desc, i).
+                                           kiov_page) +
+                              BD_GET_KIOV(desc, i).kiov_offset,
+                              BD_GET_KIOV(desc, i).kiov_len);
+               }
 
-                ct_nob += desc->bd_enc_iov[i].kiov_len;
-                pt_nob += desc->bd_iov[i].kiov_len;
-        }
+               ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
+               pt_nob += BD_GET_KIOV(desc, i).kiov_len;
+       }
 
         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
                 CERROR("%d cipher text transferred but only %d decrypted\n",
@@ -1137,10 +1151,10 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                 return -EFAULT;
         }
 
-        /* if needed, clear up the rest unused iovs */
-        if (adj_nob)
-                while (i < desc->bd_iov_count)
-                        desc->bd_iov[i++].kiov_len = 0;
+       /* if needed, clear up the rest unused iovs */
+       if (adj_nob)
+               while (i < desc->bd_iov_count)
+                       BD_GET_KIOV(desc, i++).kiov_len = 0;
 
         /* decrypt tail (krb5 header) */
         buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
@@ -1305,35 +1319,38 @@ arc4_out:
 
 static
 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
-                             struct ptlrpc_bulk_desc *desc)
+                            struct ptlrpc_bulk_desc *desc)
 {
-        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
-        int                  blocksize, i;
+       struct krb5_ctx     *kctx = gctx->internal_ctx_id;
+       int                  blocksize, i;
 
-        LASSERT(desc->bd_iov_count);
-        LASSERT(desc->bd_enc_iov);
-        LASSERT(kctx->kc_keye.kb_tfm);
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+       LASSERT(desc->bd_iov_count);
+       LASSERT(GET_ENC_KIOV(desc));
+       LASSERT(kctx->kc_keye.kb_tfm);
 
        blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
 
-        for (i = 0; i < desc->bd_iov_count; i++) {
-                LASSERT(desc->bd_enc_iov[i].kiov_page);
-                /*
-                 * offset should always start at page boundary of either
-                 * client or server side.
-                 */
-                if (desc->bd_iov[i].kiov_offset & blocksize) {
-                        CERROR("odd offset %d in page %d\n",
-                               desc->bd_iov[i].kiov_offset, i);
-                        return GSS_S_FAILURE;
-                }
+       for (i = 0; i < desc->bd_iov_count; i++) {
+               LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
+               /*
+                * offset should always start at page boundary of either
+                * client or server side.
+                */
+               if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
+                       CERROR("odd offset %d in page %d\n",
+                              BD_GET_KIOV(desc, i).kiov_offset, i);
+                       return GSS_S_FAILURE;
+               }
 
-                desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
-                desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
-                                                blocksize - 1) & (~(blocksize - 1));
-        }
+               BD_GET_ENC_KIOV(desc, i).kiov_offset =
+                       BD_GET_KIOV(desc, i).kiov_offset;
+               BD_GET_ENC_KIOV(desc, i).kiov_len =
+                       (BD_GET_KIOV(desc, i).kiov_len +
+                        blocksize - 1) & (~(blocksize - 1));
+       }
 
-        return GSS_S_COMPLETE;
+       return GSS_S_COMPLETE;
 }
 
 static
@@ -1350,6 +1367,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
         int                  rc = 0;
 
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
         LASSERT(ke);
         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
 
@@ -1396,13 +1414,13 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         data_desc[0].data = conf;
         data_desc[0].len = ke->ke_conf_size;
 
-        /* compute checksum */
-        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
-                               khdr, 1, data_desc,
-                               desc->bd_iov_count, desc->bd_iov,
-                               &cksum))
-                return GSS_S_FAILURE;
-        LASSERT(cksum.len >= ke->ke_hash_size);
+       /* compute checksum */
+       if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+                              khdr, 1, data_desc,
+                              desc->bd_iov_count, GET_KIOV(desc),
+                              &cksum))
+               return GSS_S_FAILURE;
+       LASSERT(cksum.len >= ke->ke_hash_size);
 
         /*
          * clear text layout for encryption:
@@ -1629,6 +1647,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
         int                  rc;
         __u32                major;
 
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
         LASSERT(ke);
 
         if (token->len < sizeof(*khdr)) {
@@ -1686,12 +1705,13 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
         data_desc[0].data = plain.data;
         data_desc[0].len = blocksize;
 
-        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
-                               khdr, 1, data_desc,
-                               desc->bd_iov_count, desc->bd_iov,
-                               &cksum))
-                return GSS_S_FAILURE;
-        LASSERT(cksum.len >= ke->ke_hash_size);
+       if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+                              khdr, 1, data_desc,
+                              desc->bd_iov_count,
+                              GET_KIOV(desc),
+                              &cksum))
+               return GSS_S_FAILURE;
+       LASSERT(cksum.len >= ke->ke_hash_size);
 
         if (memcmp(plain.data + blocksize + sizeof(*khdr),
                    cksum.data + cksum.len - ke->ke_hash_size,