Whamcloud - gitweb
LU-14475 log: Rewrite some log messages
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
index b85afa6..a544e8b 100644 (file)
@@ -95,6 +95,7 @@ static struct krb5_enctype enctypes[] = {
                .ke_hash_size   = 16,
                .ke_conf_size   = 8,
        },
+#ifdef HAVE_DES3_SUPPORT
        [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
                .ke_dispname    = "des3-hmac-sha1",
                .ke_enc_name    = "cbc(des3_ede)",
@@ -103,6 +104,7 @@ static struct krb5_enctype enctypes[] = {
                .ke_conf_size   = 8,
                .ke_hash_hmac   = 1,
        },
+#endif
        [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
                .ke_dispname    = "aes128-cts-hmac-sha1-96",
                .ke_enc_name    = "cbc(aes)",
@@ -445,7 +447,7 @@ __s32 krb5_make_checksum(__u32 enctype,
                         struct gss_keyblock *kb,
                         struct krb5_header *khdr,
                         int msgcnt, rawobj_t *msgs,
-                        int iovcnt, lnet_kiov_t *iovs,
+                        int iovcnt, struct bio_vec *iovs,
                         rawobj_t *cksum,
                         digest_hash hash_func)
 {
@@ -581,7 +583,7 @@ __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
                           int msgcnt,
                           rawobj_t *msgs,
                           int iovcnt,
-                          lnet_kiov_t *iovs,
+                          struct bio_vec *iovs,
                           rawobj_t *token)
 {
        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
@@ -618,7 +620,7 @@ __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
                              int msgcnt,
                              rawobj_t *msgs,
                              int iovcnt,
-                             lnet_kiov_t *iovs,
+                             struct bio_vec *iovs,
                              rawobj_t *token)
 {
        struct krb5_ctx *kctx = gctx->internal_ctx_id;
@@ -668,31 +670,27 @@ out:
  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
  */
 static
-int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
-                      struct krb5_header *khdr,
-                      char *confounder,
-                      struct ptlrpc_bulk_desc *desc,
-                      rawobj_t *cipher,
-                      int adj_nob)
+int krb5_encrypt_bulk(struct crypto_sync_skcipher *tfm,
+                     struct krb5_header *khdr,
+                     char *confounder,
+                     struct ptlrpc_bulk_desc *desc,
+                     rawobj_t *cipher,
+                     int adj_nob)
 {
-        struct blkcipher_desc   ciph_desc;
-        __u8                    local_iv[16] = {0};
-        struct scatterlist      src, dst;
-       struct sg_table         sg_src, sg_dst;
-        int                     blocksize, i, rc, nob = 0;
+       __u8 local_iv[16] = {0};
+       struct scatterlist src, dst;
+       struct sg_table sg_src, sg_dst;
+       int blocksize, i, rc, nob = 0;
+       SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
-        LASSERT(desc->bd_iov_count);
+       LASSERT(desc->bd_iov_count);
        LASSERT(desc->bd_enc_vec);
 
-       blocksize = crypto_blkcipher_blocksize(tfm);
-        LASSERT(blocksize > 1);
-        LASSERT(cipher->len == blocksize + sizeof(*khdr));
-
-        ciph_desc.tfm  = tfm;
-        ciph_desc.info = local_iv;
-        ciph_desc.flags = 0;
+       blocksize = crypto_sync_skcipher_blocksize(tfm);
+       LASSERT(blocksize > 1);
+       LASSERT(cipher->len == blocksize + sizeof(*khdr));
 
-        /* encrypt confounder */
+       /* encrypt confounder */
        rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
        if (rc != 0)
                return rc;
@@ -702,57 +700,69 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
                gss_teardown_sgtable(&sg_src);
                return rc;
        }
+       skcipher_request_set_sync_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                  blocksize, local_iv);
 
-       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
-                                        sg_src.sgl, blocksize);
+       rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
 
        gss_teardown_sgtable(&sg_dst);
        gss_teardown_sgtable(&sg_src);
 
-        if (rc) {
-                CERROR("error to encrypt confounder: %d\n", rc);
-                return rc;
-        }
+       if (rc) {
+               CERROR("error to encrypt confounder: %d\n", rc);
+               skcipher_request_zero(req);
+               return rc;
+       }
 
-        /* encrypt clear pages */
-        for (i = 0; i < desc->bd_iov_count; i++) {
+       /* encrypt clear pages */
+       for (i = 0; i < desc->bd_iov_count; i++) {
                sg_init_table(&src, 1);
-               sg_set_page(&src, desc->bd_vec[i].kiov_page,
-                           (desc->bd_vec[i].kiov_len +
+               sg_set_page(&src, desc->bd_vec[i].bv_page,
+                           (desc->bd_vec[i].bv_len +
                                blocksize - 1) &
                            (~(blocksize - 1)),
-                           desc->bd_vec[i].kiov_offset);
+                           desc->bd_vec[i].bv_offset);
                if (adj_nob)
                        nob += src.length;
                sg_init_table(&dst, 1);
-               sg_set_page(&dst, desc->bd_enc_vec[i].kiov_page,
+               sg_set_page(&dst, desc->bd_enc_vec[i].bv_page,
                            src.length, src.offset);
 
-               desc->bd_enc_vec[i].kiov_offset = dst.offset;
-               desc->bd_enc_vec[i].kiov_len = dst.length;
+               desc->bd_enc_vec[i].bv_offset = dst.offset;
+               desc->bd_enc_vec[i].bv_len = dst.length;
 
-               rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
-                                                    src.length);
-                if (rc) {
-                        CERROR("error to encrypt page: %d\n", rc);
-                        return rc;
-                }
-        }
+               skcipher_request_set_crypt(req, &src, &dst,
+                                         src.length, local_iv);
+               rc = crypto_skcipher_encrypt_iv(req, &dst, &src, src.length);
+               if (rc) {
+                       CERROR("error to encrypt page: %d\n", rc);
+                       skcipher_request_zero(req);
+                       return rc;
+               }
+       }
 
-        /* encrypt krb5 header */
+       /* encrypt krb5 header */
        rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
-       if (rc != 0)
+       if (rc != 0) {
+               skcipher_request_zero(req);
                return rc;
+       }
 
        rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
                           sizeof(*khdr));
        if (rc != 0) {
                gss_teardown_sgtable(&sg_src);
+               skcipher_request_zero(req);
                return rc;
        }
 
-       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
-                                        sizeof(*khdr));
+       skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                  sizeof(*khdr), local_iv);
+       rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl,
+                                       sizeof(*khdr));
+       skcipher_request_zero(req);
 
        gss_teardown_sgtable(&sg_dst);
        gss_teardown_sgtable(&sg_src);
@@ -772,53 +782,49 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
  * desc->bd_nob_transferred is the size of cipher text received.
  * desc->bd_nob is the target size of plain text supposed to be.
  *
- * if adj_nob != 0, we adjust each page's kiov_len to the actual
+ * if adj_nob != 0, we adjust each page's bv_len to the actual
  * plain text size.
  * - for client read: we don't know data size for each page, so
- *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
+ *   bd_iov[]->bv_len is set to PAGE_SIZE, but actual data received might
  *   be smaller, so we need to adjust it according to
- *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
+ *   bd_u.bd_kiov.bd_enc_vec[]->bv_len.
  *   this means we DO NOT support the situation that server send an odd size
  *   data in a page which is not the last one.
  * - for server write: we knows exactly data size for each page being expected,
- *   thus kiov_len is accurate already, so we should not adjust it at all.
- *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
- *   round_up(bd_iov[]->kiov_len) which
+ *   thus bv_len is accurate already, so we should not adjust it at all.
+ *   and bd_u.bd_kiov.bd_enc_vec[]->bv_len should be
+ *   round_up(bd_iov[]->bv_len) which
  *   should have been done by prep_bulk().
  */
 static
-int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
-                      struct krb5_header *khdr,
-                      struct ptlrpc_bulk_desc *desc,
-                      rawobj_t *cipher,
-                      rawobj_t *plain,
-                      int adj_nob)
+int krb5_decrypt_bulk(struct crypto_sync_skcipher *tfm,
+                     struct krb5_header *khdr,
+                     struct ptlrpc_bulk_desc *desc,
+                     rawobj_t *cipher,
+                     rawobj_t *plain,
+                     int adj_nob)
 {
-        struct blkcipher_desc   ciph_desc;
-        __u8                    local_iv[16] = {0};
-        struct scatterlist      src, dst;
-       struct sg_table         sg_src, sg_dst;
-        int                     ct_nob = 0, pt_nob = 0;
-        int                     blocksize, i, rc;
-
-        LASSERT(desc->bd_iov_count);
-       LASSERT(desc->bd_enc_vec);
-        LASSERT(desc->bd_nob_transferred);
+       __u8 local_iv[16] = {0};
+       struct scatterlist src, dst;
+       struct sg_table sg_src, sg_dst;
+       int ct_nob = 0, pt_nob = 0;
+       int blocksize, i, rc;
+       SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
-       blocksize = crypto_blkcipher_blocksize(tfm);
-        LASSERT(blocksize > 1);
-        LASSERT(cipher->len == blocksize + sizeof(*khdr));
+       LASSERT(desc->bd_iov_count);
+       LASSERT(desc->bd_enc_vec);
+       LASSERT(desc->bd_nob_transferred);
 
-        ciph_desc.tfm  = tfm;
-        ciph_desc.info = local_iv;
-        ciph_desc.flags = 0;
+       blocksize = crypto_sync_skcipher_blocksize(tfm);
+       LASSERT(blocksize > 1);
+       LASSERT(cipher->len == blocksize + sizeof(*khdr));
 
-        if (desc->bd_nob_transferred % blocksize) {
-                CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
-                return -EPROTO;
-        }
+       if (desc->bd_nob_transferred % blocksize) {
+               CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
+               return -EPROTO;
+       }
 
-        /* decrypt head (confounder) */
+       /* decrypt head (confounder) */
        rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
        if (rc != 0)
                return rc;
@@ -829,101 +835,109 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                return rc;
        }
 
-       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
-                                        sg_src.sgl, blocksize);
+       skcipher_request_set_sync_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                  blocksize, local_iv);
+
+       rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
 
        gss_teardown_sgtable(&sg_dst);
        gss_teardown_sgtable(&sg_src);
 
-        if (rc) {
-                CERROR("error to decrypt confounder: %d\n", rc);
-                return rc;
-        }
+       if (rc) {
+               CERROR("error to decrypt confounder: %d\n", rc);
+               skcipher_request_zero(req);
+               return rc;
+       }
 
        for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
             i++) {
-               if (desc->bd_enc_vec[i].kiov_offset % blocksize
-                   != 0 ||
-                   desc->bd_enc_vec[i].kiov_len % blocksize
-                   != 0) {
+               if (desc->bd_enc_vec[i].bv_offset % blocksize != 0 ||
+                   desc->bd_enc_vec[i].bv_len % blocksize != 0) {
                        CERROR("page %d: odd offset %u len %u, blocksize %d\n",
-                              i, desc->bd_enc_vec[i].kiov_offset,
-                              desc->bd_enc_vec[i].kiov_len,
+                              i, desc->bd_enc_vec[i].bv_offset,
+                              desc->bd_enc_vec[i].bv_len,
                               blocksize);
+                       skcipher_request_zero(req);
                        return -EFAULT;
                }
 
                if (adj_nob) {
-                       if (ct_nob + desc->bd_enc_vec[i].kiov_len >
+                       if (ct_nob + desc->bd_enc_vec[i].bv_len >
                            desc->bd_nob_transferred)
-                               desc->bd_enc_vec[i].kiov_len =
+                               desc->bd_enc_vec[i].bv_len =
                                        desc->bd_nob_transferred - ct_nob;
 
-                       desc->bd_vec[i].kiov_len =
-                         desc->bd_enc_vec[i].kiov_len;
-                       if (pt_nob + desc->bd_enc_vec[i].kiov_len >
+                       desc->bd_vec[i].bv_len =
+                         desc->bd_enc_vec[i].bv_len;
+                       if (pt_nob + desc->bd_enc_vec[i].bv_len >
                            desc->bd_nob)
-                               desc->bd_vec[i].kiov_len =
+                               desc->bd_vec[i].bv_len =
                                  desc->bd_nob - pt_nob;
                } else {
                        /* this should be guaranteed by LNET */
                        LASSERT(ct_nob + desc->bd_enc_vec[i].
-                               kiov_len <=
+                               bv_len <=
                                desc->bd_nob_transferred);
-                       LASSERT(desc->bd_vec[i].kiov_len <=
-                               desc->bd_enc_vec[i].kiov_len);
+                       LASSERT(desc->bd_vec[i].bv_len <=
+                               desc->bd_enc_vec[i].bv_len);
                }
 
-               if (desc->bd_enc_vec[i].kiov_len == 0)
+               if (desc->bd_enc_vec[i].bv_len == 0)
                        continue;
 
                sg_init_table(&src, 1);
-               sg_set_page(&src, desc->bd_enc_vec[i].kiov_page,
-                           desc->bd_enc_vec[i].kiov_len,
-                           desc->bd_enc_vec[i].kiov_offset);
+               sg_set_page(&src, desc->bd_enc_vec[i].bv_page,
+                           desc->bd_enc_vec[i].bv_len,
+                           desc->bd_enc_vec[i].bv_offset);
                dst = src;
-               if (desc->bd_vec[i].kiov_len % blocksize == 0)
+               if (desc->bd_vec[i].bv_len % blocksize == 0)
                        sg_assign_page(&dst,
-                                      desc->bd_vec[i].kiov_page);
-
-               rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
-                                                src.length);
-                if (rc) {
-                        CERROR("error to decrypt page: %d\n", rc);
-                        return rc;
-                }
-
-               if (desc->bd_vec[i].kiov_len % blocksize != 0) {
-                       memcpy(page_address(desc->bd_vec[i].kiov_page) +
-                              desc->bd_vec[i].kiov_offset,
+                                      desc->bd_vec[i].bv_page);
+
+               skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                          src.length, local_iv);
+               rc = crypto_skcipher_decrypt_iv(req, &dst, &src, src.length);
+               if (rc) {
+                       CERROR("error to decrypt page: %d\n", rc);
+                       skcipher_request_zero(req);
+                       return rc;
+               }
+
+               if (desc->bd_vec[i].bv_len % blocksize != 0) {
+                       memcpy(page_address(desc->bd_vec[i].bv_page) +
+                              desc->bd_vec[i].bv_offset,
                               page_address(desc->bd_enc_vec[i].
-                                           kiov_page) +
-                              desc->bd_vec[i].kiov_offset,
-                              desc->bd_vec[i].kiov_len);
+                                           bv_page) +
+                              desc->bd_vec[i].bv_offset,
+                              desc->bd_vec[i].bv_len);
                }
 
-               ct_nob += desc->bd_enc_vec[i].kiov_len;
-               pt_nob += desc->bd_vec[i].kiov_len;
+               ct_nob += desc->bd_enc_vec[i].bv_len;
+               pt_nob += desc->bd_vec[i].bv_len;
        }
 
-        if (unlikely(ct_nob != desc->bd_nob_transferred)) {
-                CERROR("%d cipher text transferred but only %d decrypted\n",
-                       desc->bd_nob_transferred, ct_nob);
-                return -EFAULT;
-        }
+       if (unlikely(ct_nob != desc->bd_nob_transferred)) {
+               CERROR("%d cipher text transferred but only %d decrypted\n",
+                      desc->bd_nob_transferred, ct_nob);
+               skcipher_request_zero(req);
+               return -EFAULT;
+       }
 
-        if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
-                CERROR("%d plain text expected but only %d received\n",
-                       desc->bd_nob, pt_nob);
-                return -EFAULT;
-        }
+       if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
+               CERROR("%d plain text expected but only %d received\n",
+                      desc->bd_nob, pt_nob);
+               skcipher_request_zero(req);
+               return -EFAULT;
+       }
 
        /* if needed, clear up the rest unused iovs */
        if (adj_nob)
                while (i < desc->bd_iov_count)
-                       desc->bd_vec[i++].kiov_len = 0;
+                       desc->bd_vec[i++].bv_len = 0;
 
-        /* decrypt tail (krb5 header) */
+       /* decrypt tail (krb5 header) */
        rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
                               sizeof(*khdr));
        if (rc != 0)
@@ -936,23 +950,25 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                return rc;
        }
 
-       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
-                                        sizeof(*khdr));
-
+       skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                 src.length, local_iv);
+       rc = crypto_skcipher_decrypt_iv(req, sg_dst.sgl, sg_src.sgl,
+                                       sizeof(*khdr));
        gss_teardown_sgtable(&sg_src);
        gss_teardown_sgtable(&sg_dst);
 
-        if (rc) {
-                CERROR("error to decrypt tail: %d\n", rc);
-                return rc;
-        }
+       skcipher_request_zero(req);
+       if (rc) {
+               CERROR("error to decrypt tail: %d\n", rc);
+               return rc;
+       }
 
-        if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
-                CERROR("krb5 header doesn't match\n");
-                return -EACCES;
-        }
+       if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
+               CERROR("krb5 header doesn't match\n");
+               return -EACCES;
+       }
 
-        return 0;
+       return 0;
 }
 
 static
@@ -977,7 +993,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
        LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
        LASSERT(kctx->kc_keye.kb_tfm == NULL ||
                ke->ke_conf_size >=
-               crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
+               crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm));
 
        /*
         * final token format:
@@ -1001,7 +1017,8 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                blocksize = 1;
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
-               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksize = crypto_sync_skcipher_blocksize(
+                                                       kctx->kc_keye.kb_tfm);
        }
        LASSERT(blocksize <= ke->ke_conf_size);
 
@@ -1049,7 +1066,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                rawobj_t arc4_keye = RAWOBJ_EMPTY;
-               struct crypto_blkcipher *arc4_tfm;
+               struct crypto_sync_skcipher *arc4_tfm;
 
                if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
                                       NULL, 1, &cksum, 0, NULL, &arc4_keye,
@@ -1058,14 +1075,14 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
-               arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+               arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
                if (IS_ERR(arc4_tfm)) {
                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
-               if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
-                                           arc4_keye.len)) {
+               if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
+                                               arc4_keye.len)) {
                        CERROR("failed to set arc4 key, len %d\n",
                               arc4_keye.len);
                        GOTO(arc4_out_tfm, rc = -EACCES);
@@ -1074,7 +1091,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
                                       &cipher, 1);
 arc4_out_tfm:
-               crypto_free_blkcipher(arc4_tfm);
+               crypto_free_sync_skcipher(arc4_tfm);
 arc4_out_key:
                rawobj_free(&arc4_keye);
        } else {
@@ -1110,24 +1127,24 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
        LASSERT(desc->bd_enc_vec);
        LASSERT(kctx->kc_keye.kb_tfm);
 
-       blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+       blocksize = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
-               LASSERT(desc->bd_enc_vec[i].kiov_page);
+               LASSERT(desc->bd_enc_vec[i].bv_page);
                /*
                 * offset should always start at page boundary of either
                 * client or server side.
                 */
-               if (desc->bd_vec[i].kiov_offset & blocksize) {
+               if (desc->bd_vec[i].bv_offset & blocksize) {
                        CERROR("odd offset %d in page %d\n",
-                              desc->bd_vec[i].kiov_offset, i);
+                              desc->bd_vec[i].bv_offset, i);
                        return GSS_S_FAILURE;
                }
 
-               desc->bd_enc_vec[i].kiov_offset =
-                       desc->bd_vec[i].kiov_offset;
-               desc->bd_enc_vec[i].kiov_len =
-                       (desc->bd_vec[i].kiov_len +
+               desc->bd_enc_vec[i].bv_offset =
+                       desc->bd_vec[i].bv_offset;
+               desc->bd_enc_vec[i].bv_len =
+                       (desc->bd_vec[i].bv_len +
                         blocksize - 1) & (~(blocksize - 1));
        }
 
@@ -1142,7 +1159,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
        struct krb5_header  *khdr;
-       int                  blocksize;
+       int                  blocksz;
        rawobj_t             cksum = RAWOBJ_EMPTY;
        rawobj_t             data_desc[1], cipher;
        __u8                 conf[GSS_MAX_CIPHER_BLOCK];
@@ -1171,10 +1188,10 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         * a tfm, currently only for arcfour-hmac */
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LASSERT(kctx->kc_keye.kb_tfm == NULL);
-               blocksize = 1;
+               blocksz = 1;
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
-               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
        }
 
        /*
@@ -1182,9 +1199,9 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         * the bulk token size would be exactly (sizeof(krb5_header) +
         * blocksize + sizeof(krb5_header) + hashsize)
         */
-       LASSERT(blocksize <= ke->ke_conf_size);
-       LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
-       LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
+       LASSERT(blocksz <= ke->ke_conf_size);
+       LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
+       LASSERT(token->len >= sizeof(*khdr) + blocksz + sizeof(*khdr) + 16);
 
        /*
         * clear text layout for checksum:
@@ -1219,7 +1236,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
        data_desc[0].len = ke->ke_conf_size;
 
        cipher.data = (__u8 *)(khdr + 1);
-       cipher.len = blocksize + sizeof(*khdr);
+       cipher.len = blocksz + sizeof(*khdr);
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LBUG();
@@ -1255,7 +1272,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
        struct krb5_header  *khdr;
        unsigned char       *tmpbuf;
-       int                  blocksize, bodysize;
+       int                  blocksz, bodysize;
        rawobj_t             cksum = RAWOBJ_EMPTY;
        rawobj_t             cipher_in, plain_out;
        rawobj_t             hash_objs[3];
@@ -1281,10 +1298,10 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
        /* block size */
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LASSERT(kctx->kc_keye.kb_tfm == NULL);
-               blocksize = 1;
+               blocksz = 1;
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
-               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
        }
 
        /* expected token layout:
@@ -1294,7 +1311,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
         */
        bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
 
-       if (bodysize % blocksize) {
+       if (bodysize % blocksz) {
                CERROR("odd bodysize %d\n", bodysize);
                return GSS_S_DEFECTIVE_TOKEN;
        }
@@ -1324,7 +1341,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                rawobj_t                 arc4_keye;
-               struct crypto_blkcipher *arc4_tfm;
+               struct crypto_sync_skcipher *arc4_tfm;
 
                cksum.data = token->data + token->len - ke->ke_hash_size;
                cksum.len = ke->ke_hash_size;
@@ -1336,14 +1353,14 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                        GOTO(arc4_out, rc = -EACCES);
                }
 
-               arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+               arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
                if (IS_ERR(arc4_tfm)) {
                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
-               if (crypto_blkcipher_setkey(arc4_tfm,
-                                           arc4_keye.data, arc4_keye.len)) {
+               if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
+                                               arc4_keye.len)) {
                        CERROR("failed to set arc4 key, len %d\n",
                               arc4_keye.len);
                        GOTO(arc4_out_tfm, rc = -EACCES);
@@ -1352,7 +1369,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
                                       &plain_out, 0);
 arc4_out_tfm:
-               crypto_free_blkcipher(arc4_tfm);
+               crypto_free_sync_skcipher(arc4_tfm);
 arc4_out_key:
                rawobj_free(&arc4_keye);
 arc4_out:
@@ -1423,7 +1440,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
        struct krb5_header  *khdr;
-       int                  blocksize;
+       int                  blocksz;
        rawobj_t             cksum = RAWOBJ_EMPTY;
        rawobj_t             cipher, plain;
        rawobj_t             data_desc[1];
@@ -1448,13 +1465,13 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
        /* block size */
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LASSERT(kctx->kc_keye.kb_tfm == NULL);
-               blocksize = 1;
+               blocksz = 1;
                LBUG();
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
-               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
        }
-       LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
+       LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
 
        /*
         * token format is expected as:
@@ -1462,14 +1479,14 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
         * | krb5 header | head/tail cipher text | cksum |
         * -----------------------------------------------
         */
-       if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
+       if (token->len < sizeof(*khdr) + blocksz + sizeof(*khdr) +
            ke->ke_hash_size) {
                CERROR("short token size: %u\n", token->len);
                return GSS_S_DEFECTIVE_TOKEN;
        }
 
        cipher.data = (__u8 *) (khdr + 1);
-       cipher.len = blocksize + sizeof(*khdr);
+       cipher.len = blocksz + sizeof(*khdr);
        plain.data = cipher.data;
        plain.len = cipher.len;
 
@@ -1485,7 +1502,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
         * ------------------------------------------
         */
        data_desc[0].data = plain.data;
-       data_desc[0].len = blocksize;
+       data_desc[0].len = blocksz;
 
        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
                               khdr, 1, data_desc,
@@ -1495,7 +1512,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
                return GSS_S_FAILURE;
        LASSERT(cksum.len >= ke->ke_hash_size);
 
-       if (memcmp(plain.data + blocksize + sizeof(*khdr),
+       if (memcmp(plain.data + blocksz + sizeof(*khdr),
                   cksum.data + cksum.len - ke->ke_hash_size,
                   ke->ke_hash_size)) {
                CERROR("checksum mismatch\n");
@@ -1508,15 +1525,15 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
 }
 
 int gss_display_kerberos(struct gss_ctx        *ctx,
-                         char                  *buf,
-                         int                    bufsize)
+                        char                  *buf,
+                        int                    bufsize)
 {
-        struct krb5_ctx    *kctx = ctx->internal_ctx_id;
-        int                 written;
+       struct krb5_ctx    *kctx = ctx->internal_ctx_id;
+       int                 written;
 
-        written = snprintf(buf, bufsize, "krb5 (%s)",
-                           enctype2str(kctx->kc_enctype));
-        return written;
+       written = scnprintf(buf, bufsize, "krb5 (%s)",
+                           enctype2str(kctx->kc_enctype));
+       return written;
 }
 
 static struct gss_api_ops gss_kerberos_ops = {