Whamcloud - gitweb
LU-13344 gss: Update crypto to use sync_skcipher 86/38586/13
authorShaun Tancheff <shaun.tancheff@hpe.com>
Sun, 24 May 2020 19:29:41 +0000 (14:29 -0500)
committerOleg Drokin <green@whamcloud.com>
Tue, 10 Nov 2020 02:06:10 +0000 (02:06 +0000)
As of linux v4.19-rc2-66-gb350bee5ea0f the change
   crypto: skcipher - Introduce crypto_sync_skcipher

Enabled the deprecation of blkcipher which was dropped
as of linux v5.4-rc1-159-gc65058b7587f
    crypto: skcipher - remove the "blkcipher" algorithm type

Based on the existence of SYNC_SKCIPHER_REQUEST_ON_STACK
use the sync_skcipher API or provide wrappers for the
blkcipher API

Test-Parameters: testlist=sanity,recovery-small,sanity-sec mdscount=2 mdtcount=4 ostcount=8 clientcount=2 env=SHARED_KEY=true,SK_FLAVOR=skn
Test-Parameters: testlist=sanity,recovery-small,sanity-sec mdscount=2 mdtcount=4 ostcount=8 clientcount=2 env=SHARED_KEY=true,SK_FLAVOR=ska
Test-Parameters: testlist=sanity,recovery-small,sanity-sec mdscount=2 mdtcount=4 ostcount=8 clientcount=2 env=SHARED_KEY=true,SK_FLAVOR=ski
Test-Parameters: testlist=sanity,recovery-small,sanity-sec mdscount=2 mdtcount=4 ostcount=8 clientcount=2 env=SHARED_KEY=true,SK_FLAVOR=skpi
HPE-bug-id: LUS-8589
Signed-off-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Change-Id: I7683c20957213fd687ef5cf6dea64c842928db5b
Reviewed-on: https://review.whamcloud.com/38586
Reviewed-by: James Simmons <jsimmons@infradead.org>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Sebastien Buisson <sbuisson@ddn.com>
Reviewed-by: Petros Koutoupis <petros.koutoupis@hpe.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/ptlrpc/gss/gss_crypto.c
lustre/ptlrpc/gss/gss_crypto.h
lustre/ptlrpc/gss/gss_krb5_mech.c
lustre/ptlrpc/gss/gss_sk_mech.c

index 7d2e09d..a07fac7 100644 (file)
@@ -59,7 +59,7 @@ int gss_keyblock_init(struct gss_keyblock *kb, const char *alg_name,
 {
        int rc;
 
 {
        int rc;
 
-       kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0);
+       kb->kb_tfm = crypto_alloc_sync_skcipher(alg_name, alg_mode, 0);
        if (IS_ERR(kb->kb_tfm)) {
                rc = PTR_ERR(kb->kb_tfm);
                kb->kb_tfm = NULL;
        if (IS_ERR(kb->kb_tfm)) {
                rc = PTR_ERR(kb->kb_tfm);
                kb->kb_tfm = NULL;
@@ -68,8 +68,8 @@ int gss_keyblock_init(struct gss_keyblock *kb, const char *alg_name,
                return rc;
        }
 
                return rc;
        }
 
-       rc = crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data,
-                                    kb->kb_key.len);
+       rc = crypto_sync_skcipher_setkey(kb->kb_tfm, kb->kb_key.data,
+                                        kb->kb_key.len);
        if (rc) {
                CERROR("failed to set %s key, len %d, rc = %d\n", alg_name,
                       kb->kb_key.len, rc);
        if (rc) {
                CERROR("failed to set %s key, len %d, rc = %d\n", alg_name,
                       kb->kb_key.len, rc);
@@ -83,7 +83,7 @@ void gss_keyblock_free(struct gss_keyblock *kb)
 {
        rawobj_free(&kb->kb_key);
        if (kb->kb_tfm)
 {
        rawobj_free(&kb->kb_key);
        if (kb->kb_tfm)
-               crypto_free_blkcipher(kb->kb_tfm);
+               crypto_free_sync_skcipher(kb->kb_tfm);
 }
 
 int gss_keyblock_dup(struct gss_keyblock *new, struct gss_keyblock *kb)
 }
 
 int gss_keyblock_dup(struct gss_keyblock *new, struct gss_keyblock *kb)
@@ -225,33 +225,31 @@ void gss_teardown_sgtable(struct sg_table *sgt)
                sg_free_table(sgt);
 }
 
                sg_free_table(sgt);
 }
 
-int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv,
-                     const void *in, void *out, size_t length)
+int gss_crypt_generic(struct crypto_sync_skcipher *tfm, int decrypt,
+                     const void *iv, const void *in, void *out, size_t length)
 {
 {
-       struct blkcipher_desc desc;
        struct scatterlist sg;
        struct sg_table sg_out;
        __u8 local_iv[16] = {0};
        __u32 ret = -EINVAL;
        struct scatterlist sg;
        struct sg_table sg_out;
        __u8 local_iv[16] = {0};
        __u32 ret = -EINVAL;
+       SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
        LASSERT(tfm);
 
        LASSERT(tfm);
-       desc.tfm = tfm;
-       desc.info = local_iv;
-       desc.flags = 0;
 
 
-       if (length % crypto_blkcipher_blocksize(tfm) != 0) {
+       if (length % crypto_sync_skcipher_blocksize(tfm) != 0) {
                CERROR("output length %zu mismatch blocksize %d\n",
                CERROR("output length %zu mismatch blocksize %d\n",
-                      length, crypto_blkcipher_blocksize(tfm));
+                      length, crypto_sync_skcipher_blocksize(tfm));
                goto out;
        }
 
                goto out;
        }
 
-       if (crypto_blkcipher_ivsize(tfm) > ARRAY_SIZE(local_iv)) {
-               CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
+       if (crypto_sync_skcipher_ivsize(tfm) > ARRAY_SIZE(local_iv)) {
+               CERROR("iv size too large %d\n",
+                       crypto_sync_skcipher_ivsize(tfm));
                goto out;
        }
 
        if (iv)
                goto out;
        }
 
        if (iv)
-               memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
+               memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
 
        if (in != out)
                memmove(out, in, length);
 
        if (in != out)
                memmove(out, in, length);
@@ -260,11 +258,16 @@ int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv,
        if (ret != 0)
                goto out;
 
        if (ret != 0)
                goto out;
 
+       skcipher_request_set_sync_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg, &sg, length, local_iv);
+
        if (decrypt)
        if (decrypt)
-               ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
+               ret = crypto_skcipher_decrypt_iv(req, &sg, &sg, length);
        else
        else
-               ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
+               ret = crypto_skcipher_encrypt_iv(req, &sg, &sg, length);
 
 
+       skcipher_request_zero(req);
        gss_teardown_sgtable(&sg_out);
 out:
        return ret;
        gss_teardown_sgtable(&sg_out);
 out:
        return ret;
@@ -396,11 +399,10 @@ int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
        return 0;
 }
 
        return 0;
 }
 
-int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv,
+int gss_crypt_rawobjs(struct crypto_sync_skcipher *tfm, __u8 *iv,
                      int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj,
                      int enc)
 {
                      int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj,
                      int enc)
 {
-       struct blkcipher_desc desc;
        struct scatterlist src;
        struct scatterlist dst;
        struct sg_table sg_dst;
        struct scatterlist src;
        struct scatterlist dst;
        struct sg_table sg_dst;
@@ -408,12 +410,13 @@ int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv,
        __u8 *buf;
        __u32 datalen = 0;
        int i, rc;
        __u8 *buf;
        __u32 datalen = 0;
        int i, rc;
+       SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
+
        ENTRY;
 
        buf = outobj->data;
        ENTRY;
 
        buf = outobj->data;
-       desc.tfm  = tfm;
-       desc.info = iv;
-       desc.flags = 0;
+       skcipher_request_set_sync_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
 
        for (i = 0; i < inobj_cnt; i++) {
                LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
 
        for (i = 0; i < inobj_cnt; i++) {
                LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
@@ -430,35 +433,30 @@ int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv,
                        RETURN(rc);
                }
 
                        RETURN(rc);
                }
 
-               if (iv) {
-                       if (enc)
-                               rc = crypto_blkcipher_encrypt_iv(&desc, &dst,
-                                                                &src,
-                                                                src.length);
-                       else
-                               rc = crypto_blkcipher_decrypt_iv(&desc, &dst,
-                                                                &src,
-                                                                src.length);
-               } else {
-                       if (enc)
-                               rc = crypto_blkcipher_encrypt(&desc, &dst, &src,
-                                                             src.length);
-                       else
-                               rc = crypto_blkcipher_decrypt(&desc, &dst, &src,
-                                                             src.length);
-               }
+               skcipher_request_set_crypt(req, &src, &dst, src.length, iv);
+               if (!iv)
+                       skcipher_request_set_crypt_iv(req);
+
+               if (enc)
+                       rc = crypto_skcipher_encrypt_iv(req, &dst, &src,
+                                                       src.length);
+               else
+                       rc = crypto_skcipher_decrypt_iv(req, &dst, &src,
+                                                       src.length);
 
                gss_teardown_sgtable(&sg_src);
                gss_teardown_sgtable(&sg_dst);
 
                if (rc) {
                        CERROR("encrypt error %d\n", rc);
 
                gss_teardown_sgtable(&sg_src);
                gss_teardown_sgtable(&sg_dst);
 
                if (rc) {
                        CERROR("encrypt error %d\n", rc);
+                       skcipher_request_zero(req);
                        RETURN(rc);
                }
 
                datalen += inobjs[i].len;
                buf += inobjs[i].len;
        }
                        RETURN(rc);
                }
 
                datalen += inobjs[i].len;
                buf += inobjs[i].len;
        }
+       skcipher_request_zero(req);
 
        outobj->len = datalen;
        RETURN(0);
 
        outobj->len = datalen;
        RETURN(0);
index 8e1061b..5dbc9f5 100644 (file)
@@ -5,9 +5,72 @@
 
 #include "gss_internal.h"
 
 
 #include "gss_internal.h"
 
+#include <crypto/skcipher.h>
+
+/*
+ * linux v4.19-rc2-66-gb350bee5ea0f
+ * crypto: skcipher - Introduce crypto_sync_skcipher
+ *
+ * crypto_sync_skcipher will replace crypto_blkcipher so start using
+ * crypto_sync_skcipher and provide wrappers for older kernels
+ */
+#ifdef SYNC_SKCIPHER_REQUEST_ON_STACK
+
+#define crypto_skcipher_encrypt_iv(desc, dst, src, blocksize)          \
+       crypto_skcipher_encrypt((desc))
+
+#define crypto_skcipher_decrypt_iv(desc, dst, src, blocksize)          \
+       crypto_skcipher_decrypt((desc))
+
+#define skcipher_request_set_crypt_iv(d)
+
+#else /* ! SYNC_SKCIPHER_REQUEST_ON_STACK */
+
+#define        crypto_sync_skcipher            crypto_blkcipher
+
+#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm)                      \
+       struct blkcipher_desc __##name##_obj, *name = (void *)&__##name##_obj
+
+#define skcipher_request_set_sync_tfm(d, _tfm)                         \
+       do { (d)->tfm = _tfm; } while (0)
+
+#define skcipher_request_set_callback(d, f, c, data)                   \
+       do { (d)->flags = f; } while (0)
+
+#define skcipher_request_set_crypt(d, src, dst, cryptlen, iv)          \
+       do { (d)->info = iv; } while (0)
+
+#define skcipher_request_set_crypt_iv(d)                               \
+       do { (d)->info = crypto_blkcipher_crt((d)->tfm)->iv; } while (0)
+
+#define crypto_sync_skcipher_blocksize(tfm)                            \
+       crypto_blkcipher_blocksize((tfm))
+
+#define crypto_sync_skcipher_setkey(tfm, key, keylen)                  \
+       crypto_blkcipher_setkey((tfm), (key), (keylen))
+
+#define crypto_alloc_sync_skcipher(name, type, mask)                   \
+       crypto_alloc_blkcipher((name), (type), (mask))
+
+#define crypto_free_sync_skcipher(tfm)                                 \
+       crypto_free_blkcipher((tfm))
+
+#define crypto_sync_skcipher_ivsize(tfm)                               \
+       crypto_blkcipher_ivsize((tfm))
+
+#define crypto_skcipher_encrypt_iv(desc, dst, src, len)                        \
+       crypto_blkcipher_encrypt_iv((desc), (dst), (src), (len))
+
+#define crypto_skcipher_decrypt_iv(desc, dst, src, len)                        \
+       crypto_blkcipher_decrypt_iv((desc), (dst), (src), (len))
+
+#define skcipher_request_zero(req) /* nop */
+
+#endif /* SYNC_SKCIPHER_REQUEST_ON_STACK */
+
 struct gss_keyblock {
 struct gss_keyblock {
-       rawobj_t                 kb_key;
-       struct crypto_blkcipher *kb_tfm;
+       rawobj_t kb_key;
+       struct crypto_sync_skcipher *kb_tfm;
 };
 
 int gss_keyblock_init(struct gss_keyblock *kb, const char *alg_name,
 };
 
 int gss_keyblock_init(struct gss_keyblock *kb, const char *alg_name,
@@ -21,8 +84,8 @@ int gss_get_keyblock(char **ptr, const char *end, struct gss_keyblock *kb,
 int gss_setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
                      const void *buf, unsigned int buf_len);
 void gss_teardown_sgtable(struct sg_table *sgt);
 int gss_setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
                      const void *buf, unsigned int buf_len);
 void gss_teardown_sgtable(struct sg_table *sgt);
-int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv,
-                     const void *in, void *out, size_t length);
+int gss_crypt_generic(struct crypto_sync_skcipher *tfm, int decrypt,
+                     const void *iv, const void *in, void *out, size_t length);
 int gss_digest_hash(struct ahash_request *req, rawobj_t *hdr,
                    int msgcnt, rawobj_t *msgs, int iovcnt,
                    struct bio_vec *iovs);
 int gss_digest_hash(struct ahash_request *req, rawobj_t *hdr,
                    int msgcnt, rawobj_t *msgs, int iovcnt,
                    struct bio_vec *iovs);
@@ -30,7 +93,7 @@ int gss_digest_hash_compat(struct ahash_request *req,
                           rawobj_t *hdr, int msgcnt, rawobj_t *msgs,
                           int iovcnt, struct bio_vec *iovs);
 int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize);
                           rawobj_t *hdr, int msgcnt, rawobj_t *msgs,
                           int iovcnt, struct bio_vec *iovs);
 int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize);
-int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv,
+int gss_crypt_rawobjs(struct crypto_sync_skcipher *tfm, __u8 *iv,
                      int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj,
                      int enc);
 
                      int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj,
                      int enc);
 
index 6ce908a..e0ddff9 100644 (file)
@@ -670,31 +670,27 @@ out:
  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
  */
 static
  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
  */
 static
-int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
-                      struct krb5_header *khdr,
-                      char *confounder,
-                      struct ptlrpc_bulk_desc *desc,
-                      rawobj_t *cipher,
-                      int adj_nob)
+int krb5_encrypt_bulk(struct crypto_sync_skcipher *tfm,
+                     struct krb5_header *khdr,
+                     char *confounder,
+                     struct ptlrpc_bulk_desc *desc,
+                     rawobj_t *cipher,
+                     int adj_nob)
 {
 {
-        struct blkcipher_desc   ciph_desc;
-        __u8                    local_iv[16] = {0};
-        struct scatterlist      src, dst;
-       struct sg_table         sg_src, sg_dst;
-        int                     blocksize, i, rc, nob = 0;
+       __u8 local_iv[16] = {0};
+       struct scatterlist src, dst;
+       struct sg_table sg_src, sg_dst;
+       int blocksize, i, rc, nob = 0;
+       SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
 
-        LASSERT(desc->bd_iov_count);
+       LASSERT(desc->bd_iov_count);
        LASSERT(desc->bd_enc_vec);
 
        LASSERT(desc->bd_enc_vec);
 
-       blocksize = crypto_blkcipher_blocksize(tfm);
-        LASSERT(blocksize > 1);
-        LASSERT(cipher->len == blocksize + sizeof(*khdr));
-
-        ciph_desc.tfm  = tfm;
-        ciph_desc.info = local_iv;
-        ciph_desc.flags = 0;
+       blocksize = crypto_sync_skcipher_blocksize(tfm);
+       LASSERT(blocksize > 1);
+       LASSERT(cipher->len == blocksize + sizeof(*khdr));
 
 
-        /* encrypt confounder */
+       /* encrypt confounder */
        rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
        if (rc != 0)
                return rc;
        rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
        if (rc != 0)
                return rc;
@@ -704,20 +700,24 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
                gss_teardown_sgtable(&sg_src);
                return rc;
        }
                gss_teardown_sgtable(&sg_src);
                return rc;
        }
+       skcipher_request_set_sync_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                  blocksize, local_iv);
 
 
-       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
-                                        sg_src.sgl, blocksize);
+       rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
 
        gss_teardown_sgtable(&sg_dst);
        gss_teardown_sgtable(&sg_src);
 
 
        gss_teardown_sgtable(&sg_dst);
        gss_teardown_sgtable(&sg_src);
 
-        if (rc) {
-                CERROR("error to encrypt confounder: %d\n", rc);
-                return rc;
-        }
+       if (rc) {
+               CERROR("error to encrypt confounder: %d\n", rc);
+               skcipher_request_zero(req);
+               return rc;
+       }
 
 
-        /* encrypt clear pages */
-        for (i = 0; i < desc->bd_iov_count; i++) {
+       /* encrypt clear pages */
+       for (i = 0; i < desc->bd_iov_count; i++) {
                sg_init_table(&src, 1);
                sg_set_page(&src, desc->bd_vec[i].bv_page,
                            (desc->bd_vec[i].bv_len +
                sg_init_table(&src, 1);
                sg_set_page(&src, desc->bd_vec[i].bv_page,
                            (desc->bd_vec[i].bv_len +
@@ -733,28 +733,36 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
                desc->bd_enc_vec[i].bv_offset = dst.offset;
                desc->bd_enc_vec[i].bv_len = dst.length;
 
                desc->bd_enc_vec[i].bv_offset = dst.offset;
                desc->bd_enc_vec[i].bv_len = dst.length;
 
-               rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
-                                                    src.length);
-                if (rc) {
-                        CERROR("error to encrypt page: %d\n", rc);
-                        return rc;
-                }
-        }
+               skcipher_request_set_crypt(req, &src, &dst,
+                                         src.length, local_iv);
+               rc = crypto_skcipher_encrypt_iv(req, &dst, &src, src.length);
+               if (rc) {
+                       CERROR("error to encrypt page: %d\n", rc);
+                       skcipher_request_zero(req);
+                       return rc;
+               }
+       }
 
 
-        /* encrypt krb5 header */
+       /* encrypt krb5 header */
        rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
        rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
-       if (rc != 0)
+       if (rc != 0) {
+               skcipher_request_zero(req);
                return rc;
                return rc;
+       }
 
        rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
                           sizeof(*khdr));
        if (rc != 0) {
                gss_teardown_sgtable(&sg_src);
 
        rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
                           sizeof(*khdr));
        if (rc != 0) {
                gss_teardown_sgtable(&sg_src);
+               skcipher_request_zero(req);
                return rc;
        }
 
                return rc;
        }
 
-       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
-                                        sizeof(*khdr));
+       skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                  sizeof(*khdr), local_iv);
+       rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl,
+                                       sizeof(*khdr));
+       skcipher_request_zero(req);
 
        gss_teardown_sgtable(&sg_dst);
        gss_teardown_sgtable(&sg_src);
 
        gss_teardown_sgtable(&sg_dst);
        gss_teardown_sgtable(&sg_src);
@@ -789,38 +797,34 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
  *   should have been done by prep_bulk().
  */
 static
  *   should have been done by prep_bulk().
  */
 static
-int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
-                      struct krb5_header *khdr,
-                      struct ptlrpc_bulk_desc *desc,
-                      rawobj_t *cipher,
-                      rawobj_t *plain,
-                      int adj_nob)
+int krb5_decrypt_bulk(struct crypto_sync_skcipher *tfm,
+                     struct krb5_header *khdr,
+                     struct ptlrpc_bulk_desc *desc,
+                     rawobj_t *cipher,
+                     rawobj_t *plain,
+                     int adj_nob)
 {
 {
-        struct blkcipher_desc   ciph_desc;
-        __u8                    local_iv[16] = {0};
-        struct scatterlist      src, dst;
-       struct sg_table         sg_src, sg_dst;
-        int                     ct_nob = 0, pt_nob = 0;
-        int                     blocksize, i, rc;
-
-        LASSERT(desc->bd_iov_count);
-       LASSERT(desc->bd_enc_vec);
-        LASSERT(desc->bd_nob_transferred);
+       __u8 local_iv[16] = {0};
+       struct scatterlist src, dst;
+       struct sg_table sg_src, sg_dst;
+       int ct_nob = 0, pt_nob = 0;
+       int blocksize, i, rc;
+       SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
 
-       blocksize = crypto_blkcipher_blocksize(tfm);
-        LASSERT(blocksize > 1);
-        LASSERT(cipher->len == blocksize + sizeof(*khdr));
+       LASSERT(desc->bd_iov_count);
+       LASSERT(desc->bd_enc_vec);
+       LASSERT(desc->bd_nob_transferred);
 
 
-        ciph_desc.tfm  = tfm;
-        ciph_desc.info = local_iv;
-        ciph_desc.flags = 0;
+       blocksize = crypto_sync_skcipher_blocksize(tfm);
+       LASSERT(blocksize > 1);
+       LASSERT(cipher->len == blocksize + sizeof(*khdr));
 
 
-        if (desc->bd_nob_transferred % blocksize) {
-                CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
-                return -EPROTO;
-        }
+       if (desc->bd_nob_transferred % blocksize) {
+               CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
+               return -EPROTO;
+       }
 
 
-        /* decrypt head (confounder) */
+       /* decrypt head (confounder) */
        rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
        if (rc != 0)
                return rc;
        rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
        if (rc != 0)
                return rc;
@@ -831,27 +835,31 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                return rc;
        }
 
                return rc;
        }
 
-       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
-                                        sg_src.sgl, blocksize);
+       skcipher_request_set_sync_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                  blocksize, local_iv);
+
+       rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
 
        gss_teardown_sgtable(&sg_dst);
        gss_teardown_sgtable(&sg_src);
 
 
        gss_teardown_sgtable(&sg_dst);
        gss_teardown_sgtable(&sg_src);
 
-        if (rc) {
-                CERROR("error to decrypt confounder: %d\n", rc);
-                return rc;
-        }
+       if (rc) {
+               CERROR("error to decrypt confounder: %d\n", rc);
+               skcipher_request_zero(req);
+               return rc;
+       }
 
        for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
             i++) {
 
        for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
             i++) {
-               if (desc->bd_enc_vec[i].bv_offset % blocksize
-                   != 0 ||
-                   desc->bd_enc_vec[i].bv_len % blocksize
-                   != 0) {
+               if (desc->bd_enc_vec[i].bv_offset % blocksize != 0 ||
+                   desc->bd_enc_vec[i].bv_len % blocksize != 0) {
                        CERROR("page %d: odd offset %u len %u, blocksize %d\n",
                               i, desc->bd_enc_vec[i].bv_offset,
                               desc->bd_enc_vec[i].bv_len,
                               blocksize);
                        CERROR("page %d: odd offset %u len %u, blocksize %d\n",
                               i, desc->bd_enc_vec[i].bv_offset,
                               desc->bd_enc_vec[i].bv_len,
                               blocksize);
+                       skcipher_request_zero(req);
                        return -EFAULT;
                }
 
                        return -EFAULT;
                }
 
@@ -888,12 +896,14 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                        sg_assign_page(&dst,
                                       desc->bd_vec[i].bv_page);
 
                        sg_assign_page(&dst,
                                       desc->bd_vec[i].bv_page);
 
-               rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
-                                                src.length);
-                if (rc) {
-                        CERROR("error to decrypt page: %d\n", rc);
-                        return rc;
-                }
+               skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                          src.length, local_iv);
+               rc = crypto_skcipher_decrypt_iv(req, &dst, &src, src.length);
+               if (rc) {
+                       CERROR("error to decrypt page: %d\n", rc);
+                       skcipher_request_zero(req);
+                       return rc;
+               }
 
                if (desc->bd_vec[i].bv_len % blocksize != 0) {
                        memcpy(page_address(desc->bd_vec[i].bv_page) +
 
                if (desc->bd_vec[i].bv_len % blocksize != 0) {
                        memcpy(page_address(desc->bd_vec[i].bv_page) +
@@ -908,24 +918,26 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                pt_nob += desc->bd_vec[i].bv_len;
        }
 
                pt_nob += desc->bd_vec[i].bv_len;
        }
 
-        if (unlikely(ct_nob != desc->bd_nob_transferred)) {
-                CERROR("%d cipher text transferred but only %d decrypted\n",
-                       desc->bd_nob_transferred, ct_nob);
-                return -EFAULT;
-        }
+       if (unlikely(ct_nob != desc->bd_nob_transferred)) {
+               CERROR("%d cipher text transferred but only %d decrypted\n",
+                      desc->bd_nob_transferred, ct_nob);
+               skcipher_request_zero(req);
+               return -EFAULT;
+       }
 
 
-        if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
-                CERROR("%d plain text expected but only %d received\n",
-                       desc->bd_nob, pt_nob);
-                return -EFAULT;
-        }
+       if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
+               CERROR("%d plain text expected but only %d received\n",
+                      desc->bd_nob, pt_nob);
+               skcipher_request_zero(req);
+               return -EFAULT;
+       }
 
        /* if needed, clear up the rest unused iovs */
        if (adj_nob)
                while (i < desc->bd_iov_count)
                        desc->bd_vec[i++].bv_len = 0;
 
 
        /* if needed, clear up the rest unused iovs */
        if (adj_nob)
                while (i < desc->bd_iov_count)
                        desc->bd_vec[i++].bv_len = 0;
 
-        /* decrypt tail (krb5 header) */
+       /* decrypt tail (krb5 header) */
        rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
                               sizeof(*khdr));
        if (rc != 0)
        rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
                               sizeof(*khdr));
        if (rc != 0)
@@ -938,23 +950,25 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                return rc;
        }
 
                return rc;
        }
 
-       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
-                                        sizeof(*khdr));
-
+       skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                 src.length, local_iv);
+       rc = crypto_skcipher_decrypt_iv(req, sg_dst.sgl, sg_src.sgl,
+                                       sizeof(*khdr));
        gss_teardown_sgtable(&sg_src);
        gss_teardown_sgtable(&sg_dst);
 
        gss_teardown_sgtable(&sg_src);
        gss_teardown_sgtable(&sg_dst);
 
-        if (rc) {
-                CERROR("error to decrypt tail: %d\n", rc);
-                return rc;
-        }
+       skcipher_request_zero(req);
+       if (rc) {
+               CERROR("error to decrypt tail: %d\n", rc);
+               return rc;
+       }
 
 
-        if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
-                CERROR("krb5 header doesn't match\n");
-                return -EACCES;
-        }
+       if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
+               CERROR("krb5 header doesn't match\n");
+               return -EACCES;
+       }
 
 
-        return 0;
+       return 0;
 }
 
 static
 }
 
 static
@@ -979,7 +993,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
        LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
        LASSERT(kctx->kc_keye.kb_tfm == NULL ||
                ke->ke_conf_size >=
        LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
        LASSERT(kctx->kc_keye.kb_tfm == NULL ||
                ke->ke_conf_size >=
-               crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
+               crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm));
 
        /*
         * final token format:
 
        /*
         * final token format:
@@ -1003,7 +1017,8 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                blocksize = 1;
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
                blocksize = 1;
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
-               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksize = crypto_sync_skcipher_blocksize(
+                                                       kctx->kc_keye.kb_tfm);
        }
        LASSERT(blocksize <= ke->ke_conf_size);
 
        }
        LASSERT(blocksize <= ke->ke_conf_size);
 
@@ -1051,7 +1066,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                rawobj_t arc4_keye = RAWOBJ_EMPTY;
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                rawobj_t arc4_keye = RAWOBJ_EMPTY;
-               struct crypto_blkcipher *arc4_tfm;
+               struct crypto_sync_skcipher *arc4_tfm;
 
                if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
                                       NULL, 1, &cksum, 0, NULL, &arc4_keye,
 
                if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
                                       NULL, 1, &cksum, 0, NULL, &arc4_keye,
@@ -1060,14 +1075,14 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
-               arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+               arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
                if (IS_ERR(arc4_tfm)) {
                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
                if (IS_ERR(arc4_tfm)) {
                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
-               if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
-                                           arc4_keye.len)) {
+               if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
+                                               arc4_keye.len)) {
                        CERROR("failed to set arc4 key, len %d\n",
                               arc4_keye.len);
                        GOTO(arc4_out_tfm, rc = -EACCES);
                        CERROR("failed to set arc4 key, len %d\n",
                               arc4_keye.len);
                        GOTO(arc4_out_tfm, rc = -EACCES);
@@ -1076,7 +1091,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
                                       &cipher, 1);
 arc4_out_tfm:
                rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
                                       &cipher, 1);
 arc4_out_tfm:
-               crypto_free_blkcipher(arc4_tfm);
+               crypto_free_sync_skcipher(arc4_tfm);
 arc4_out_key:
                rawobj_free(&arc4_keye);
        } else {
 arc4_out_key:
                rawobj_free(&arc4_keye);
        } else {
@@ -1112,7 +1127,7 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
        LASSERT(desc->bd_enc_vec);
        LASSERT(kctx->kc_keye.kb_tfm);
 
        LASSERT(desc->bd_enc_vec);
        LASSERT(kctx->kc_keye.kb_tfm);
 
-       blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+       blocksize = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
                LASSERT(desc->bd_enc_vec[i].bv_page);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
                LASSERT(desc->bd_enc_vec[i].bv_page);
@@ -1144,7 +1159,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
        struct krb5_header  *khdr;
        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
        struct krb5_header  *khdr;
-       int                  blocksize;
+       int                  blocksz;
        rawobj_t             cksum = RAWOBJ_EMPTY;
        rawobj_t             data_desc[1], cipher;
        __u8                 conf[GSS_MAX_CIPHER_BLOCK];
        rawobj_t             cksum = RAWOBJ_EMPTY;
        rawobj_t             data_desc[1], cipher;
        __u8                 conf[GSS_MAX_CIPHER_BLOCK];
@@ -1173,10 +1188,10 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         * a tfm, currently only for arcfour-hmac */
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LASSERT(kctx->kc_keye.kb_tfm == NULL);
         * a tfm, currently only for arcfour-hmac */
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LASSERT(kctx->kc_keye.kb_tfm == NULL);
-               blocksize = 1;
+               blocksz = 1;
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
-               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
        }
 
        /*
        }
 
        /*
@@ -1184,9 +1199,9 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         * the bulk token size would be exactly (sizeof(krb5_header) +
         * blocksize + sizeof(krb5_header) + hashsize)
         */
         * the bulk token size would be exactly (sizeof(krb5_header) +
         * blocksize + sizeof(krb5_header) + hashsize)
         */
-       LASSERT(blocksize <= ke->ke_conf_size);
-       LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
-       LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
+       LASSERT(blocksz <= ke->ke_conf_size);
+       LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
+       LASSERT(token->len >= sizeof(*khdr) + blocksz + sizeof(*khdr) + 16);
 
        /*
         * clear text layout for checksum:
 
        /*
         * clear text layout for checksum:
@@ -1221,7 +1236,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
        data_desc[0].len = ke->ke_conf_size;
 
        cipher.data = (__u8 *)(khdr + 1);
        data_desc[0].len = ke->ke_conf_size;
 
        cipher.data = (__u8 *)(khdr + 1);
-       cipher.len = blocksize + sizeof(*khdr);
+       cipher.len = blocksz + sizeof(*khdr);
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LBUG();
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LBUG();
@@ -1257,7 +1272,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
        struct krb5_header  *khdr;
        unsigned char       *tmpbuf;
        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
        struct krb5_header  *khdr;
        unsigned char       *tmpbuf;
-       int                  blocksize, bodysize;
+       int                  blocksz, bodysize;
        rawobj_t             cksum = RAWOBJ_EMPTY;
        rawobj_t             cipher_in, plain_out;
        rawobj_t             hash_objs[3];
        rawobj_t             cksum = RAWOBJ_EMPTY;
        rawobj_t             cipher_in, plain_out;
        rawobj_t             hash_objs[3];
@@ -1283,10 +1298,10 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
        /* block size */
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LASSERT(kctx->kc_keye.kb_tfm == NULL);
        /* block size */
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LASSERT(kctx->kc_keye.kb_tfm == NULL);
-               blocksize = 1;
+               blocksz = 1;
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
-               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
        }
 
        /* expected token layout:
        }
 
        /* expected token layout:
@@ -1296,7 +1311,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
         */
        bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
 
         */
        bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
 
-       if (bodysize % blocksize) {
+       if (bodysize % blocksz) {
                CERROR("odd bodysize %d\n", bodysize);
                return GSS_S_DEFECTIVE_TOKEN;
        }
                CERROR("odd bodysize %d\n", bodysize);
                return GSS_S_DEFECTIVE_TOKEN;
        }
@@ -1326,7 +1341,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                rawobj_t                 arc4_keye;
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                rawobj_t                 arc4_keye;
-               struct crypto_blkcipher *arc4_tfm;
+               struct crypto_sync_skcipher *arc4_tfm;
 
                cksum.data = token->data + token->len - ke->ke_hash_size;
                cksum.len = ke->ke_hash_size;
 
                cksum.data = token->data + token->len - ke->ke_hash_size;
                cksum.len = ke->ke_hash_size;
@@ -1338,14 +1353,14 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                        GOTO(arc4_out, rc = -EACCES);
                }
 
                        GOTO(arc4_out, rc = -EACCES);
                }
 
-               arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+               arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
                if (IS_ERR(arc4_tfm)) {
                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
                if (IS_ERR(arc4_tfm)) {
                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
-               if (crypto_blkcipher_setkey(arc4_tfm,
-                                           arc4_keye.data, arc4_keye.len)) {
+               if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
+                                               arc4_keye.len)) {
                        CERROR("failed to set arc4 key, len %d\n",
                               arc4_keye.len);
                        GOTO(arc4_out_tfm, rc = -EACCES);
                        CERROR("failed to set arc4 key, len %d\n",
                               arc4_keye.len);
                        GOTO(arc4_out_tfm, rc = -EACCES);
@@ -1354,7 +1369,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
                                       &plain_out, 0);
 arc4_out_tfm:
                rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
                                       &plain_out, 0);
 arc4_out_tfm:
-               crypto_free_blkcipher(arc4_tfm);
+               crypto_free_sync_skcipher(arc4_tfm);
 arc4_out_key:
                rawobj_free(&arc4_keye);
 arc4_out:
 arc4_out_key:
                rawobj_free(&arc4_keye);
 arc4_out:
@@ -1425,7 +1440,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
        struct krb5_header  *khdr;
        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
        struct krb5_header  *khdr;
-       int                  blocksize;
+       int                  blocksz;
        rawobj_t             cksum = RAWOBJ_EMPTY;
        rawobj_t             cipher, plain;
        rawobj_t             data_desc[1];
        rawobj_t             cksum = RAWOBJ_EMPTY;
        rawobj_t             cipher, plain;
        rawobj_t             data_desc[1];
@@ -1450,13 +1465,13 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
        /* block size */
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LASSERT(kctx->kc_keye.kb_tfm == NULL);
        /* block size */
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LASSERT(kctx->kc_keye.kb_tfm == NULL);
-               blocksize = 1;
+               blocksz = 1;
                LBUG();
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
                LBUG();
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
-               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
        }
        }
-       LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
+       LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
 
        /*
         * token format is expected as:
 
        /*
         * token format is expected as:
@@ -1464,14 +1479,14 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
         * | krb5 header | head/tail cipher text | cksum |
         * -----------------------------------------------
         */
         * | krb5 header | head/tail cipher text | cksum |
         * -----------------------------------------------
         */
-       if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
+       if (token->len < sizeof(*khdr) + blocksz + sizeof(*khdr) +
            ke->ke_hash_size) {
                CERROR("short token size: %u\n", token->len);
                return GSS_S_DEFECTIVE_TOKEN;
        }
 
        cipher.data = (__u8 *) (khdr + 1);
            ke->ke_hash_size) {
                CERROR("short token size: %u\n", token->len);
                return GSS_S_DEFECTIVE_TOKEN;
        }
 
        cipher.data = (__u8 *) (khdr + 1);
-       cipher.len = blocksize + sizeof(*khdr);
+       cipher.len = blocksz + sizeof(*khdr);
        plain.data = cipher.data;
        plain.len = cipher.len;
 
        plain.data = cipher.data;
        plain.len = cipher.len;
 
@@ -1487,7 +1502,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
         * ------------------------------------------
         */
        data_desc[0].data = plain.data;
         * ------------------------------------------
         */
        data_desc[0].data = plain.data;
-       data_desc[0].len = blocksize;
+       data_desc[0].len = blocksz;
 
        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
                               khdr, 1, data_desc,
 
        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
                               khdr, 1, data_desc,
@@ -1497,7 +1512,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
                return GSS_S_FAILURE;
        LASSERT(cksum.len >= ke->ke_hash_size);
 
                return GSS_S_FAILURE;
        LASSERT(cksum.len >= ke->ke_hash_size);
 
-       if (memcmp(plain.data + blocksize + sizeof(*khdr),
+       if (memcmp(plain.data + blocksz + sizeof(*khdr),
                   cksum.data + cksum.len - ke->ke_hash_size,
                   ke->ke_hash_size)) {
                CERROR("checksum mismatch\n");
                   cksum.data + cksum.len - ke->ke_hash_size,
                   ke->ke_hash_size)) {
                CERROR("checksum mismatch\n");
index dbed77c..9406370 100644 (file)
@@ -511,7 +511,7 @@ __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
 
        LASSERT(skc->sc_session_kb.kb_tfm);
 
 
        LASSERT(skc->sc_session_kb.kb_tfm);
 
-       blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
+       blocksize = crypto_sync_skcipher_blocksize(skc->sc_session_kb.kb_tfm);
        if (gss_add_padding(message, message_buffer_length, blocksize))
                return GSS_S_FAILURE;
 
        if (gss_add_padding(message, message_buffer_length, blocksize))
                return GSS_S_FAILURE;
 
@@ -573,7 +573,7 @@ __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
        skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
        skw.skw_hmac.len = sht_bytes;
 
        skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
        skw.skw_hmac.len = sht_bytes;
 
-       blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
+       blocksize = crypto_sync_skcipher_blocksize(skc->sc_session_kb.kb_tfm);
        if (skw.skw_cipher.len % blocksize != 0)
                return GSS_S_DEFECTIVE_TOKEN;
 
        if (skw.skw_cipher.len % blocksize != 0)
                return GSS_S_DEFECTIVE_TOKEN;
 
@@ -609,7 +609,7 @@ __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
        int i;
 
        LASSERT(skc->sc_session_kb.kb_tfm);
        int i;
 
        LASSERT(skc->sc_session_kb.kb_tfm);
-       blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
+       blocksize = crypto_sync_skcipher_blocksize(skc->sc_session_kb.kb_tfm);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
                if (desc->bd_vec[i].bv_offset & blocksize) {
 
        for (i = 0; i < desc->bd_iov_count; i++) {
                if (desc->bd_vec[i].bv_offset & blocksize) {
@@ -627,27 +627,26 @@ __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
        return GSS_S_COMPLETE;
 }
 
        return GSS_S_COMPLETE;
 }
 
-static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
+static __u32 sk_encrypt_bulk(struct crypto_sync_skcipher *tfm, __u8 *iv,
                             struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
                             int adj_nob)
 {
                             struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
                             int adj_nob)
 {
-       struct blkcipher_desc cdesc = {
-               .tfm = tfm,
-               .info = iv,
-               .flags = 0,
-       };
        struct scatterlist ptxt;
        struct scatterlist ctxt;
        int blocksize;
        int i;
        int rc;
        int nob = 0;
        struct scatterlist ptxt;
        struct scatterlist ctxt;
        int blocksize;
        int i;
        int rc;
        int nob = 0;
+       SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
 
-       blocksize = crypto_blkcipher_blocksize(tfm);
+       blocksize = crypto_sync_skcipher_blocksize(tfm);
 
        sg_init_table(&ptxt, 1);
        sg_init_table(&ctxt, 1);
 
 
        sg_init_table(&ptxt, 1);
        sg_init_table(&ctxt, 1);
 
+       skcipher_request_set_sync_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+
        for (i = 0; i < desc->bd_iov_count; i++) {
                sg_set_page(&ptxt, desc->bd_vec[i].bv_page,
                            sk_block_mask(desc->bd_vec[i].bv_len,
        for (i = 0; i < desc->bd_iov_count; i++) {
                sg_set_page(&ptxt, desc->bd_vec[i].bv_page,
                            sk_block_mask(desc->bd_vec[i].bv_len,
@@ -661,13 +660,15 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
                desc->bd_enc_vec[i].bv_offset = ctxt.offset;
                desc->bd_enc_vec[i].bv_len = ctxt.length;
 
                desc->bd_enc_vec[i].bv_offset = ctxt.offset;
                desc->bd_enc_vec[i].bv_len = ctxt.length;
 
-               rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
-                                                ptxt.length);
+               skcipher_request_set_crypt(req, &ptxt, &ctxt, ptxt.length, iv);
+               rc = crypto_skcipher_encrypt_iv(req, &ctxt, &ptxt, ptxt.length);
                if (rc) {
                        CERROR("failed to encrypt page: %d\n", rc);
                if (rc) {
                        CERROR("failed to encrypt page: %d\n", rc);
+                       skcipher_request_zero(req);
                        return rc;
                }
        }
                        return rc;
                }
        }
+       skcipher_request_zero(req);
 
        if (adj_nob)
                desc->bd_nob = nob;
 
        if (adj_nob)
                desc->bd_nob = nob;
@@ -675,15 +676,10 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
        return 0;
 }
 
        return 0;
 }
 
-static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
+static __u32 sk_decrypt_bulk(struct crypto_sync_skcipher *tfm, __u8 *iv,
                             struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
                             int adj_nob)
 {
                             struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
                             int adj_nob)
 {
-       struct blkcipher_desc cdesc = {
-               .tfm = tfm,
-               .info = iv,
-               .flags = 0,
-       };
        struct scatterlist ptxt;
        struct scatterlist ctxt;
        int blocksize;
        struct scatterlist ptxt;
        struct scatterlist ctxt;
        int blocksize;
@@ -691,17 +687,21 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
        int rc;
        int pnob = 0;
        int cnob = 0;
        int rc;
        int pnob = 0;
        int cnob = 0;
+       SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
        sg_init_table(&ptxt, 1);
        sg_init_table(&ctxt, 1);
 
 
        sg_init_table(&ptxt, 1);
        sg_init_table(&ctxt, 1);
 
-       blocksize = crypto_blkcipher_blocksize(tfm);
+       blocksize = crypto_sync_skcipher_blocksize(tfm);
        if (desc->bd_nob_transferred % blocksize != 0) {
                CERROR("Transfer not a multiple of block size: %d\n",
                       desc->bd_nob_transferred);
                return GSS_S_DEFECTIVE_TOKEN;
        }
 
        if (desc->bd_nob_transferred % blocksize != 0) {
                CERROR("Transfer not a multiple of block size: %d\n",
                       desc->bd_nob_transferred);
                return GSS_S_DEFECTIVE_TOKEN;
        }
 
+       skcipher_request_set_sync_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+
        for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
             i++) {
                struct bio_vec *piov = &desc->bd_vec[i];
        for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
             i++) {
                struct bio_vec *piov = &desc->bd_vec[i];
@@ -710,6 +710,7 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
                if (ciov->bv_offset % blocksize != 0 ||
                    ciov->bv_len % blocksize != 0) {
                        CERROR("Invalid bulk descriptor vector\n");
                if (ciov->bv_offset % blocksize != 0 ||
                    ciov->bv_len % blocksize != 0) {
                        CERROR("Invalid bulk descriptor vector\n");
+                       skcipher_request_zero(req);
                        return GSS_S_DEFECTIVE_TOKEN;
                }
 
                        return GSS_S_DEFECTIVE_TOKEN;
                }
 
@@ -733,6 +734,7 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
                        if (ciov->bv_len + cnob > desc->bd_nob_transferred ||
                            piov->bv_len > ciov->bv_len) {
                                CERROR("Invalid decrypted length\n");
                        if (ciov->bv_len + cnob > desc->bd_nob_transferred ||
                            piov->bv_len > ciov->bv_len) {
                                CERROR("Invalid decrypted length\n");
+                               skcipher_request_zero(req);
                                return GSS_S_FAILURE;
                        }
                }
                                return GSS_S_FAILURE;
                        }
                }
@@ -751,10 +753,11 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
                if (piov->bv_len % blocksize == 0)
                        sg_assign_page(&ptxt, piov->bv_page);
 
                if (piov->bv_len % blocksize == 0)
                        sg_assign_page(&ptxt, piov->bv_page);
 
-               rc = crypto_blkcipher_decrypt_iv(&cdesc, &ptxt, &ctxt,
-                                                ctxt.length);
+               skcipher_request_set_crypt(req, &ctxt, &ptxt, ptxt.length, iv);
+               rc = crypto_skcipher_decrypt_iv(req, &ptxt, &ctxt, ptxt.length);
                if (rc) {
                        CERROR("Decryption failed for page: %d\n", rc);
                if (rc) {
                        CERROR("Decryption failed for page: %d\n", rc);
+                       skcipher_request_zero(req);
                        return GSS_S_FAILURE;
                }
 
                        return GSS_S_FAILURE;
                }
 
@@ -769,6 +772,7 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
                cnob += ciov->bv_len;
                pnob += piov->bv_len;
        }
                cnob += ciov->bv_len;
                pnob += piov->bv_len;
        }
+       skcipher_request_zero(req);
 
        /* if needed, clear up the rest unused iovs */
        if (adj_nob)
 
        /* if needed, clear up the rest unused iovs */
        if (adj_nob)