From: Shaun Tancheff Date: Sun, 24 May 2020 19:29:41 +0000 (-0500) Subject: LU-13344 gss: Update crypto to use sync_skcipher X-Git-Tag: 2.12.7-RC1~46 X-Git-Url: https://git.whamcloud.com/?a=commitdiff_plain;h=1d67a1eb08c7b95e7d15fa72c360e4fe11f838d4;p=fs%2Flustre-release.git LU-13344 gss: Update crypto to use sync_skcipher As of linux v4.19-rc2-66-gb350bee5ea0f the change crypto: skcipher - Introduce crypto_sync_skcipher Enabled the deprecation of blkcipher which was dropped as of linux v5.4-rc1-159-gc65058b7587f crypto: skcipher - remove the "blkcipher" algorithm type Based on the existence of SYNC_SKCIPHER_REQUEST_ON_STACK use the sync_skcipher API or provide wrappers for the blkcipher API Lustre-change: https://review.whamcloud.com/38586 Lustre-commit: 0a65279121a5a0f5c8831dd2ebd6927a235a94c2 HPE-bug-id: LUS-8589 Signed-off-by: Shaun Tancheff Change-Id: I7683c20957213fd687ef5cf6dea64c842928db5b Reviewed-on: https://review.whamcloud.com/40994 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Andreas Dilger Reviewed-by: Petros Koutoupis Reviewed-by: James Simmons Reviewed-by: Oleg Drokin --- diff --git a/lustre/ptlrpc/gss/gss_crypto.c b/lustre/ptlrpc/gss/gss_crypto.c index 5aebc48..7be412d 100644 --- a/lustre/ptlrpc/gss/gss_crypto.c +++ b/lustre/ptlrpc/gss/gss_crypto.c @@ -60,7 +60,7 @@ int gss_keyblock_init(struct gss_keyblock *kb, const char *alg_name, { int rc; - kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0); + kb->kb_tfm = crypto_alloc_sync_skcipher(alg_name, alg_mode, 0); if (IS_ERR(kb->kb_tfm)) { rc = PTR_ERR(kb->kb_tfm); kb->kb_tfm = NULL; @@ -69,8 +69,8 @@ int gss_keyblock_init(struct gss_keyblock *kb, const char *alg_name, return rc; } - rc = crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, - kb->kb_key.len); + rc = crypto_sync_skcipher_setkey(kb->kb_tfm, kb->kb_key.data, + kb->kb_key.len); if (rc) { CERROR("failed to set %s key, len %d, rc = %d\n", alg_name, kb->kb_key.len, rc); @@ -84,7 +84,7 @@ void gss_keyblock_free(struct gss_keyblock *kb) { rawobj_free(&kb->kb_key); if (kb->kb_tfm) - crypto_free_blkcipher(kb->kb_tfm); + crypto_free_sync_skcipher(kb->kb_tfm); } int gss_keyblock_dup(struct gss_keyblock *new, struct gss_keyblock *kb) @@ -226,33 +226,31 @@ void gss_teardown_sgtable(struct sg_table *sgt) sg_free_table(sgt); } -int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv, - const void *in, void *out, size_t length) +int gss_crypt_generic(struct crypto_sync_skcipher *tfm, int decrypt, + const void *iv, const void *in, void *out, size_t length) { - struct blkcipher_desc desc; struct scatterlist sg; struct sg_table sg_out; __u8 local_iv[16] = {0}; __u32 ret = -EINVAL; + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); LASSERT(tfm); - desc.tfm = tfm; - desc.info = local_iv; - desc.flags = 0; - if (length % crypto_blkcipher_blocksize(tfm) != 0) { + if (length % crypto_sync_skcipher_blocksize(tfm) != 0) { CERROR("output length %zu mismatch blocksize %d\n", - length, crypto_blkcipher_blocksize(tfm)); + length, crypto_sync_skcipher_blocksize(tfm)); goto out; } - if (crypto_blkcipher_ivsize(tfm) > ARRAY_SIZE(local_iv)) { - CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm)); + if (crypto_sync_skcipher_ivsize(tfm) > ARRAY_SIZE(local_iv)) { + CERROR("iv size too large %d\n", + crypto_sync_skcipher_ivsize(tfm)); goto out; } if (iv) - memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); + memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm)); if (in != out) memmove(out, in, length); @@ -261,11 +259,16 @@ int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv, if (ret != 0) goto out; + skcipher_request_set_sync_tfm(req, tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, &sg, &sg, length, local_iv); + if (decrypt) - ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length); + ret = crypto_skcipher_decrypt_iv(req, &sg, &sg, length); else - ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length); + ret = crypto_skcipher_encrypt_iv(req, &sg, &sg, length); + skcipher_request_zero(req); gss_teardown_sgtable(&sg_out); out: return ret; @@ -397,11 +400,10 @@ int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize) return 0; } -int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv, +int gss_crypt_rawobjs(struct crypto_sync_skcipher *tfm, __u8 *iv, int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj, int enc) { - struct blkcipher_desc desc; struct scatterlist src; struct scatterlist dst; struct sg_table sg_dst; @@ -409,12 +411,13 @@ int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv, __u8 *buf; __u32 datalen = 0; int i, rc; + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); + ENTRY; buf = outobj->data; - desc.tfm = tfm; - desc.info = iv; - desc.flags = 0; + skcipher_request_set_sync_tfm(req, tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); for (i = 0; i < inobj_cnt; i++) { LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len); @@ -431,35 +434,30 @@ int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv, RETURN(rc); } - if (iv) { - if (enc) - rc = crypto_blkcipher_encrypt_iv(&desc, &dst, - &src, - src.length); - else - rc = crypto_blkcipher_decrypt_iv(&desc, &dst, - &src, - src.length); - } else { - if (enc) - rc = crypto_blkcipher_encrypt(&desc, &dst, &src, - src.length); - else - rc = crypto_blkcipher_decrypt(&desc, &dst, &src, - src.length); - } + skcipher_request_set_crypt(req, &src, &dst, src.length, iv); + if (!iv) + skcipher_request_set_crypt_iv(req); + + if (enc) + rc = crypto_skcipher_encrypt_iv(req, &dst, &src, + src.length); + else + rc = crypto_skcipher_decrypt_iv(req, &dst, &src, + src.length); gss_teardown_sgtable(&sg_src); gss_teardown_sgtable(&sg_dst); if (rc) { CERROR("encrypt error %d\n", rc); + skcipher_request_zero(req); RETURN(rc); } datalen += inobjs[i].len; buf += inobjs[i].len; } + skcipher_request_zero(req); outobj->len = datalen; RETURN(0); diff --git a/lustre/ptlrpc/gss/gss_crypto.h b/lustre/ptlrpc/gss/gss_crypto.h index 39a2b4e..7ed680a 100644 --- a/lustre/ptlrpc/gss/gss_crypto.h +++ b/lustre/ptlrpc/gss/gss_crypto.h @@ -5,9 +5,72 @@ #include "gss_internal.h" +#include + +/* + * linux v4.19-rc2-66-gb350bee5ea0f + * crypto: skcipher - Introduce crypto_sync_skcipher + * + * crypto_sync_skcipher will replace crypto_blkcipher so start using + * crypto_sync_skcipher and provide wrappers for older kernels + */ +#ifdef SYNC_SKCIPHER_REQUEST_ON_STACK + +#define crypto_skcipher_encrypt_iv(desc, dst, src, blocksize) \ + crypto_skcipher_encrypt((desc)) + +#define crypto_skcipher_decrypt_iv(desc, dst, src, blocksize) \ + crypto_skcipher_decrypt((desc)) + +#define skcipher_request_set_crypt_iv(d) + +#else /* ! SYNC_SKCIPHER_REQUEST_ON_STACK */ + +#define crypto_sync_skcipher crypto_blkcipher + +#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \ + struct blkcipher_desc __##name##_obj, *name = (void *)&__##name##_obj + +#define skcipher_request_set_sync_tfm(d, _tfm) \ + do { (d)->tfm = _tfm; } while (0) + +#define skcipher_request_set_callback(d, f, c, data) \ + do { (d)->flags = f; } while (0) + +#define skcipher_request_set_crypt(d, src, dst, cryptlen, iv) \ + do { (d)->info = iv; } while (0) + +#define skcipher_request_set_crypt_iv(d) \ + do { (d)->info = crypto_blkcipher_crt((d)->tfm)->iv; } while (0) + +#define crypto_sync_skcipher_blocksize(tfm) \ + crypto_blkcipher_blocksize((tfm)) + +#define crypto_sync_skcipher_setkey(tfm, key, keylen) \ + crypto_blkcipher_setkey((tfm), (key), (keylen)) + +#define crypto_alloc_sync_skcipher(name, type, mask) \ + crypto_alloc_blkcipher((name), (type), (mask)) + +#define crypto_free_sync_skcipher(tfm) \ + crypto_free_blkcipher((tfm)) + +#define crypto_sync_skcipher_ivsize(tfm) \ + crypto_blkcipher_ivsize((tfm)) + +#define crypto_skcipher_encrypt_iv(desc, dst, src, len) \ + crypto_blkcipher_encrypt_iv((desc), (dst), (src), (len)) + +#define crypto_skcipher_decrypt_iv(desc, dst, src, len) \ + crypto_blkcipher_decrypt_iv((desc), (dst), (src), (len)) + +#define skcipher_request_zero(req) /* nop */ + +#endif /* SYNC_SKCIPHER_REQUEST_ON_STACK */ + struct gss_keyblock { - rawobj_t kb_key; - struct crypto_blkcipher *kb_tfm; + rawobj_t kb_key; + struct crypto_sync_skcipher *kb_tfm; }; int gss_keyblock_init(struct gss_keyblock *kb, const char *alg_name, @@ -21,15 +84,15 @@ int gss_get_keyblock(char **ptr, const char *end, struct gss_keyblock *kb, int gss_setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg, const void *buf, unsigned int buf_len); void gss_teardown_sgtable(struct sg_table *sgt); -int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv, - const void *in, void *out, size_t length); +int gss_crypt_generic(struct crypto_sync_skcipher *tfm, int decrypt, + const void *iv, const void *in, void *out, size_t length); int gss_digest_hash(struct ahash_request *req, rawobj_t *hdr, int msgcnt, rawobj_t *msgs, int iovcnt, lnet_kiov_t *iovs); int gss_digest_hash_compat(struct ahash_request *req, rawobj_t *hdr, int msgcnt, rawobj_t *msgs, int iovcnt, lnet_kiov_t *iovs); int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize); -int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv, +int gss_crypt_rawobjs(struct crypto_sync_skcipher *tfm, __u8 *iv, int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj, int enc); diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index 368fcc5..bd3a94b 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -669,32 +669,28 @@ out: * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size. */ static -int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, - struct krb5_header *khdr, - char *confounder, - struct ptlrpc_bulk_desc *desc, - rawobj_t *cipher, - int adj_nob) +int krb5_encrypt_bulk(struct crypto_sync_skcipher *tfm, + struct krb5_header *khdr, + char *confounder, + struct ptlrpc_bulk_desc *desc, + rawobj_t *cipher, + int adj_nob) { - struct blkcipher_desc ciph_desc; - __u8 local_iv[16] = {0}; - struct scatterlist src, dst; - struct sg_table sg_src, sg_dst; - int blocksize, i, rc, nob = 0; + __u8 local_iv[16] = {0}; + struct scatterlist src, dst; + struct sg_table sg_src, sg_dst; + int blocksize, i, rc, nob = 0; + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); - LASSERT(desc->bd_iov_count); + LASSERT(desc->bd_iov_count); LASSERT(GET_ENC_KIOV(desc)); - blocksize = crypto_blkcipher_blocksize(tfm); - LASSERT(blocksize > 1); - LASSERT(cipher->len == blocksize + sizeof(*khdr)); - - ciph_desc.tfm = tfm; - ciph_desc.info = local_iv; - ciph_desc.flags = 0; + blocksize = crypto_sync_skcipher_blocksize(tfm); + LASSERT(blocksize > 1); + LASSERT(cipher->len == blocksize + sizeof(*khdr)); - /* encrypt confounder */ + /* encrypt confounder */ rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize); if (rc != 0) return rc; @@ -704,20 +700,24 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, gss_teardown_sgtable(&sg_src); return rc; } + skcipher_request_set_sync_tfm(req, tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl, + blocksize, local_iv); - rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, - sg_src.sgl, blocksize); + rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize); gss_teardown_sgtable(&sg_dst); gss_teardown_sgtable(&sg_src); - if (rc) { - CERROR("error to encrypt confounder: %d\n", rc); - return rc; - } + if (rc) { + CERROR("error to encrypt confounder: %d\n", rc); + skcipher_request_zero(req); + return rc; + } - /* encrypt clear pages */ - for (i = 0; i < desc->bd_iov_count; i++) { + /* encrypt clear pages */ + for (i = 0; i < desc->bd_iov_count; i++) { sg_init_table(&src, 1); sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page, (BD_GET_KIOV(desc, i).kiov_len + @@ -733,28 +733,36 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset; BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length; - rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, - src.length); - if (rc) { - CERROR("error to encrypt page: %d\n", rc); - return rc; - } - } + skcipher_request_set_crypt(req, &src, &dst, + src.length, local_iv); + rc = crypto_skcipher_encrypt_iv(req, &dst, &src, src.length); + if (rc) { + CERROR("error to encrypt page: %d\n", rc); + skcipher_request_zero(req); + return rc; + } + } - /* encrypt krb5 header */ + /* encrypt krb5 header */ rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr)); - if (rc != 0) + if (rc != 0) { + skcipher_request_zero(req); return rc; + } rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize, sizeof(*khdr)); if (rc != 0) { gss_teardown_sgtable(&sg_src); + skcipher_request_zero(req); return rc; } - rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl, - sizeof(*khdr)); + skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl, + sizeof(*khdr), local_iv); + rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, + sizeof(*khdr)); + skcipher_request_zero(req); gss_teardown_sgtable(&sg_dst); gss_teardown_sgtable(&sg_src); @@ -789,39 +797,35 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, * should have been done by prep_bulk(). */ static -int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, - struct krb5_header *khdr, - struct ptlrpc_bulk_desc *desc, - rawobj_t *cipher, - rawobj_t *plain, - int adj_nob) +int krb5_decrypt_bulk(struct crypto_sync_skcipher *tfm, + struct krb5_header *khdr, + struct ptlrpc_bulk_desc *desc, + rawobj_t *cipher, + rawobj_t *plain, + int adj_nob) { - struct blkcipher_desc ciph_desc; - __u8 local_iv[16] = {0}; - struct scatterlist src, dst; - struct sg_table sg_src, sg_dst; - int ct_nob = 0, pt_nob = 0; - int blocksize, i, rc; + __u8 local_iv[16] = {0}; + struct scatterlist src, dst; + struct sg_table sg_src, sg_dst; + int ct_nob = 0, pt_nob = 0; + int blocksize, i, rc; + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); - LASSERT(desc->bd_iov_count); + LASSERT(desc->bd_iov_count); LASSERT(GET_ENC_KIOV(desc)); - LASSERT(desc->bd_nob_transferred); + LASSERT(desc->bd_nob_transferred); - blocksize = crypto_blkcipher_blocksize(tfm); - LASSERT(blocksize > 1); - LASSERT(cipher->len == blocksize + sizeof(*khdr)); + blocksize = crypto_sync_skcipher_blocksize(tfm); + LASSERT(blocksize > 1); + LASSERT(cipher->len == blocksize + sizeof(*khdr)); - ciph_desc.tfm = tfm; - ciph_desc.info = local_iv; - ciph_desc.flags = 0; - - if (desc->bd_nob_transferred % blocksize) { - CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred); - return -EPROTO; - } + if (desc->bd_nob_transferred % blocksize) { + CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred); + return -EPROTO; + } - /* decrypt head (confounder) */ + /* decrypt head (confounder) */ rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize); if (rc != 0) return rc; @@ -832,27 +836,31 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, return rc; } - rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, - sg_src.sgl, blocksize); + skcipher_request_set_sync_tfm(req, tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl, + blocksize, local_iv); + + rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize); gss_teardown_sgtable(&sg_dst); gss_teardown_sgtable(&sg_src); - if (rc) { - CERROR("error to decrypt confounder: %d\n", rc); - return rc; - } + if (rc) { + CERROR("error to decrypt confounder: %d\n", rc); + skcipher_request_zero(req); + return rc; + } for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred; i++) { - if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize - != 0 || - BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize - != 0) { + if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize != 0 || + BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize != 0) { CERROR("page %d: odd offset %u len %u, blocksize %d\n", i, BD_GET_ENC_KIOV(desc, i).kiov_offset, BD_GET_ENC_KIOV(desc, i).kiov_len, blocksize); + skcipher_request_zero(req); return -EFAULT; } @@ -889,12 +897,14 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, sg_assign_page(&dst, BD_GET_KIOV(desc, i).kiov_page); - rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, - src.length); - if (rc) { - CERROR("error to decrypt page: %d\n", rc); - return rc; - } + skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl, + src.length, local_iv); + rc = crypto_skcipher_decrypt_iv(req, &dst, &src, src.length); + if (rc) { + CERROR("error to decrypt page: %d\n", rc); + skcipher_request_zero(req); + return rc; + } if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) { memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) + @@ -909,24 +919,26 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, pt_nob += BD_GET_KIOV(desc, i).kiov_len; } - if (unlikely(ct_nob != desc->bd_nob_transferred)) { - CERROR("%d cipher text transferred but only %d decrypted\n", - desc->bd_nob_transferred, ct_nob); - return -EFAULT; - } + if (unlikely(ct_nob != desc->bd_nob_transferred)) { + CERROR("%d cipher text transferred but only %d decrypted\n", + desc->bd_nob_transferred, ct_nob); + skcipher_request_zero(req); + return -EFAULT; + } - if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) { - CERROR("%d plain text expected but only %d received\n", - desc->bd_nob, pt_nob); - return -EFAULT; - } + if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) { + CERROR("%d plain text expected but only %d received\n", + desc->bd_nob, pt_nob); + skcipher_request_zero(req); + return -EFAULT; + } /* if needed, clear up the rest unused iovs */ if (adj_nob) while (i < desc->bd_iov_count) BD_GET_KIOV(desc, i++).kiov_len = 0; - /* decrypt tail (krb5 header) */ + /* decrypt tail (krb5 header) */ rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize, sizeof(*khdr)); if (rc != 0) @@ -939,23 +951,25 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, return rc; } - rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl, - sizeof(*khdr)); - + skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl, + src.length, local_iv); + rc = crypto_skcipher_decrypt_iv(req, sg_dst.sgl, sg_src.sgl, + sizeof(*khdr)); gss_teardown_sgtable(&sg_src); gss_teardown_sgtable(&sg_dst); - if (rc) { - CERROR("error to decrypt tail: %d\n", rc); - return rc; - } + skcipher_request_zero(req); + if (rc) { + CERROR("error to decrypt tail: %d\n", rc); + return rc; + } - if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) { - CERROR("krb5 header doesn't match\n"); - return -EACCES; - } + if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) { + CERROR("krb5 header doesn't match\n"); + return -EACCES; + } - return 0; + return 0; } static @@ -980,7 +994,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK); LASSERT(kctx->kc_keye.kb_tfm == NULL || ke->ke_conf_size >= - crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm)); + crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm)); /* * final token format: @@ -1004,7 +1018,8 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, blocksize = 1; } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksize = crypto_sync_skcipher_blocksize( + kctx->kc_keye.kb_tfm); } LASSERT(blocksize <= ke->ke_conf_size); @@ -1052,7 +1067,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { rawobj_t arc4_keye = RAWOBJ_EMPTY; - struct crypto_blkcipher *arc4_tfm; + struct crypto_sync_skcipher *arc4_tfm; if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, NULL, 1, &cksum, 0, NULL, &arc4_keye, @@ -1061,14 +1076,14 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, GOTO(arc4_out_key, rc = -EACCES); } - arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0); + arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); if (IS_ERR(arc4_tfm)) { CERROR("failed to alloc tfm arc4 in ECB mode\n"); GOTO(arc4_out_key, rc = -EACCES); } - if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, - arc4_keye.len)) { + if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data, + arc4_keye.len)) { CERROR("failed to set arc4 key, len %d\n", arc4_keye.len); GOTO(arc4_out_tfm, rc = -EACCES); @@ -1077,7 +1092,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc, &cipher, 1); arc4_out_tfm: - crypto_free_blkcipher(arc4_tfm); + crypto_free_sync_skcipher(arc4_tfm); arc4_out_key: rawobj_free(&arc4_keye); } else { @@ -1114,7 +1129,7 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx, LASSERT(GET_ENC_KIOV(desc)); LASSERT(kctx->kc_keye.kb_tfm); - blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksize = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm); for (i = 0; i < desc->bd_iov_count; i++) { LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page); @@ -1146,7 +1161,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, struct krb5_ctx *kctx = gctx->internal_ctx_id; struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; struct krb5_header *khdr; - int blocksize; + int blocksz; rawobj_t cksum = RAWOBJ_EMPTY; rawobj_t data_desc[1], cipher; __u8 conf[GSS_MAX_CIPHER_BLOCK]; @@ -1176,10 +1191,10 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, * a tfm, currently only for arcfour-hmac */ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { LASSERT(kctx->kc_keye.kb_tfm == NULL); - blocksize = 1; + blocksz = 1; } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm); } /* @@ -1187,9 +1202,9 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, * the bulk token size would be exactly (sizeof(krb5_header) + * blocksize + sizeof(krb5_header) + hashsize) */ - LASSERT(blocksize <= ke->ke_conf_size); - LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0); - LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16); + LASSERT(blocksz <= ke->ke_conf_size); + LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0); + LASSERT(token->len >= sizeof(*khdr) + blocksz + sizeof(*khdr) + 16); /* * clear text layout for checksum: @@ -1224,7 +1239,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, data_desc[0].len = ke->ke_conf_size; cipher.data = (__u8 *)(khdr + 1); - cipher.len = blocksize + sizeof(*khdr); + cipher.len = blocksz + sizeof(*khdr); if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { LBUG(); @@ -1260,7 +1275,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; struct krb5_header *khdr; unsigned char *tmpbuf; - int blocksize, bodysize; + int blocksz, bodysize; rawobj_t cksum = RAWOBJ_EMPTY; rawobj_t cipher_in, plain_out; rawobj_t hash_objs[3]; @@ -1286,10 +1301,10 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, /* block size */ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { LASSERT(kctx->kc_keye.kb_tfm == NULL); - blocksize = 1; + blocksz = 1; } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm); } /* expected token layout: @@ -1299,7 +1314,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, */ bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size; - if (bodysize % blocksize) { + if (bodysize % blocksz) { CERROR("odd bodysize %d\n", bodysize); return GSS_S_DEFECTIVE_TOKEN; } @@ -1329,7 +1344,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { rawobj_t arc4_keye; - struct crypto_blkcipher *arc4_tfm; + struct crypto_sync_skcipher *arc4_tfm; cksum.data = token->data + token->len - ke->ke_hash_size; cksum.len = ke->ke_hash_size; @@ -1341,14 +1356,14 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, GOTO(arc4_out, rc = -EACCES); } - arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0); + arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); if (IS_ERR(arc4_tfm)) { CERROR("failed to alloc tfm arc4 in ECB mode\n"); GOTO(arc4_out_key, rc = -EACCES); } - if (crypto_blkcipher_setkey(arc4_tfm, - arc4_keye.data, arc4_keye.len)) { + if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data, + arc4_keye.len)) { CERROR("failed to set arc4 key, len %d\n", arc4_keye.len); GOTO(arc4_out_tfm, rc = -EACCES); @@ -1357,7 +1372,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in, &plain_out, 0); arc4_out_tfm: - crypto_free_blkcipher(arc4_tfm); + crypto_free_sync_skcipher(arc4_tfm); arc4_out_key: rawobj_free(&arc4_keye); arc4_out: @@ -1428,7 +1443,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, struct krb5_ctx *kctx = gctx->internal_ctx_id; struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; struct krb5_header *khdr; - int blocksize; + int blocksz; rawobj_t cksum = RAWOBJ_EMPTY; rawobj_t cipher, plain; rawobj_t data_desc[1]; @@ -1454,13 +1469,13 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, /* block size */ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { LASSERT(kctx->kc_keye.kb_tfm == NULL); - blocksize = 1; + blocksz = 1; LBUG(); } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm); } - LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0); + LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0); /* * token format is expected as: @@ -1468,14 +1483,14 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, * | krb5 header | head/tail cipher text | cksum | * ----------------------------------------------- */ - if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) + + if (token->len < sizeof(*khdr) + blocksz + sizeof(*khdr) + ke->ke_hash_size) { CERROR("short token size: %u\n", token->len); return GSS_S_DEFECTIVE_TOKEN; } cipher.data = (__u8 *) (khdr + 1); - cipher.len = blocksize + sizeof(*khdr); + cipher.len = blocksz + sizeof(*khdr); plain.data = cipher.data; plain.len = cipher.len; @@ -1491,7 +1506,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, * ------------------------------------------ */ data_desc[0].data = plain.data; - data_desc[0].len = blocksize; + data_desc[0].len = blocksz; if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, khdr, 1, data_desc, @@ -1501,7 +1516,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, return GSS_S_FAILURE; LASSERT(cksum.len >= ke->ke_hash_size); - if (memcmp(plain.data + blocksize + sizeof(*khdr), + if (memcmp(plain.data + blocksz + sizeof(*khdr), cksum.data + cksum.len - ke->ke_hash_size, ke->ke_hash_size)) { CERROR("checksum mismatch\n"); diff --git a/lustre/ptlrpc/gss/gss_sk_mech.c b/lustre/ptlrpc/gss/gss_sk_mech.c index 862ab89..69e92bc 100644 --- a/lustre/ptlrpc/gss/gss_sk_mech.c +++ b/lustre/ptlrpc/gss/gss_sk_mech.c @@ -511,7 +511,7 @@ __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header, LASSERT(skc->sc_session_kb.kb_tfm); - blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm); + blocksize = crypto_sync_skcipher_blocksize(skc->sc_session_kb.kb_tfm); if (gss_add_padding(message, message_buffer_length, blocksize)) return GSS_S_FAILURE; @@ -573,7 +573,7 @@ __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header, skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len; skw.skw_hmac.len = sht_bytes; - blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm); + blocksize = crypto_sync_skcipher_blocksize(skc->sc_session_kb.kb_tfm); if (skw.skw_cipher.len % blocksize != 0) return GSS_S_DEFECTIVE_TOKEN; @@ -609,7 +609,7 @@ __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context, int i; LASSERT(skc->sc_session_kb.kb_tfm); - blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm); + blocksize = crypto_sync_skcipher_blocksize(skc->sc_session_kb.kb_tfm); for (i = 0; i < desc->bd_iov_count; i++) { if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) { @@ -627,27 +627,26 @@ __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context, return GSS_S_COMPLETE; } -static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, +static __u32 sk_encrypt_bulk(struct crypto_sync_skcipher *tfm, __u8 *iv, struct ptlrpc_bulk_desc *desc, rawobj_t *cipher, int adj_nob) { - struct blkcipher_desc cdesc = { - .tfm = tfm, - .info = iv, - .flags = 0, - }; struct scatterlist ptxt; struct scatterlist ctxt; int blocksize; int i; int rc; int nob = 0; + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); - blocksize = crypto_blkcipher_blocksize(tfm); + blocksize = crypto_sync_skcipher_blocksize(tfm); sg_init_table(&ptxt, 1); sg_init_table(&ctxt, 1); + skcipher_request_set_sync_tfm(req, tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + for (i = 0; i < desc->bd_iov_count; i++) { sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page, sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, @@ -661,13 +660,15 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset; BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length; - rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt, - ptxt.length); + skcipher_request_set_crypt(req, &ptxt, &ctxt, ptxt.length, iv); + rc = crypto_skcipher_encrypt_iv(req, &ctxt, &ptxt, ptxt.length); if (rc) { CERROR("failed to encrypt page: %d\n", rc); + skcipher_request_zero(req); return rc; } } + skcipher_request_zero(req); if (adj_nob) desc->bd_nob = nob; @@ -675,15 +676,10 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, return 0; } -static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, +static __u32 sk_decrypt_bulk(struct crypto_sync_skcipher *tfm, __u8 *iv, struct ptlrpc_bulk_desc *desc, rawobj_t *cipher, int adj_nob) { - struct blkcipher_desc cdesc = { - .tfm = tfm, - .info = iv, - .flags = 0, - }; struct scatterlist ptxt; struct scatterlist ctxt; int blocksize; @@ -691,17 +687,21 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, int rc; int pnob = 0; int cnob = 0; + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); sg_init_table(&ptxt, 1); sg_init_table(&ctxt, 1); - blocksize = crypto_blkcipher_blocksize(tfm); + blocksize = crypto_sync_skcipher_blocksize(tfm); if (desc->bd_nob_transferred % blocksize != 0) { CERROR("Transfer not a multiple of block size: %d\n", desc->bd_nob_transferred); return GSS_S_DEFECTIVE_TOKEN; } + skcipher_request_set_sync_tfm(req, tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred; i++) { lnet_kiov_t *piov = &BD_GET_KIOV(desc, i); @@ -710,6 +710,7 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, if (ciov->kiov_offset % blocksize != 0 || ciov->kiov_len % blocksize != 0) { CERROR("Invalid bulk descriptor vector\n"); + skcipher_request_zero(req); return GSS_S_DEFECTIVE_TOKEN; } @@ -733,6 +734,7 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, if (ciov->kiov_len + cnob > desc->bd_nob_transferred || piov->kiov_len > ciov->kiov_len) { CERROR("Invalid decrypted length\n"); + skcipher_request_zero(req); return GSS_S_FAILURE; } } @@ -751,10 +753,11 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, if (piov->kiov_len % blocksize == 0) sg_assign_page(&ptxt, piov->kiov_page); - rc = crypto_blkcipher_decrypt_iv(&cdesc, &ptxt, &ctxt, - ctxt.length); + skcipher_request_set_crypt(req, &ctxt, &ptxt, ptxt.length, iv); + rc = crypto_skcipher_decrypt_iv(req, &ptxt, &ctxt, ptxt.length); if (rc) { CERROR("Decryption failed for page: %d\n", rc); + skcipher_request_zero(req); return GSS_S_FAILURE; } @@ -769,6 +772,7 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, cnob += ciov->kiov_len; pnob += piov->kiov_len; } + skcipher_request_zero(req); /* if needed, clear up the rest unused iovs */ if (adj_nob)