From: James Simmons Date: Tue, 17 Sep 2013 17:50:59 +0000 (-0400) Subject: LU-2800 compat: remove crypto shims X-Git-Tag: 2.5.52~39 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=8b5efe94bc8d3a0314fd106c9a1c7859f3549737;ds=sidebyside LU-2800 compat: remove crypto shims Now that we've removed the tests for the crypto APIs, we can remove all of the ll_* prefixed compatibility shims from the rest of the code. Change-Id: I9a10b57d48614f892dd61f31ee4bee8fbea93ea8 Signed-off-by: James Simmons Signed-off-by: Jeff Mahoney Signed-off-by: Thomas Stibor Reviewed-on: http://review.whamcloud.com/5347 Tested-by: Jenkins Reviewed-by: Bob Glossman Tested-by: Maloo Reviewed-by: Andreas Dilger --- diff --git a/lustre/include/linux/lustre_compat25.h b/lustre/include/linux/lustre_compat25.h index 3e52025..880e6eb 100644 --- a/lustre/include/linux/lustre_compat25.h +++ b/lustre/include/linux/lustre_compat25.h @@ -154,38 +154,7 @@ static inline struct file *ll_dentry_open(struct path *path, int flags, /* add a lustre compatible layer for crypto API */ #include -#define ll_crypto_hash crypto_hash -#define ll_crypto_cipher crypto_blkcipher -#define ll_crypto_alloc_hash(name, type, mask) crypto_alloc_hash(name, type, mask) -#define ll_crypto_hash_setkey(tfm, key, keylen) crypto_hash_setkey(tfm, key, keylen) -#define ll_crypto_hash_init(desc) crypto_hash_init(desc) -#define ll_crypto_hash_update(desc, sl, bytes) crypto_hash_update(desc, sl, bytes) -#define ll_crypto_hash_final(desc, out) crypto_hash_final(desc, out) -#define ll_crypto_blkcipher_setkey(tfm, key, keylen) \ - crypto_blkcipher_setkey(tfm, key, keylen) -#define ll_crypto_blkcipher_set_iv(tfm, src, len) \ - crypto_blkcipher_set_iv(tfm, src, len) -#define ll_crypto_blkcipher_get_iv(tfm, dst, len) \ - crypto_blkcipher_get_iv(tfm, dst, len) -#define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \ - crypto_blkcipher_encrypt(desc, dst, src, bytes) -#define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \ - crypto_blkcipher_decrypt(desc, dst, src, bytes) -#define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \ - crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) -#define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \ - crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) - -static inline -struct ll_crypto_cipher *ll_crypto_alloc_blkcipher(const char *name, - u32 type, u32 mask) -{ - struct ll_crypto_cipher *rtn = crypto_alloc_blkcipher(name, type, mask); - - return (rtn == NULL ? ERR_PTR(-ENOMEM) : rtn); -} - -static inline int ll_crypto_hmac(struct ll_crypto_hash *tfm, +static inline int ll_crypto_hmac(struct crypto_hash *tfm, u8 *key, unsigned int *keylen, struct scatterlist *sg, unsigned int size, u8 *result) @@ -201,24 +170,13 @@ static inline int ll_crypto_hmac(struct ll_crypto_hash *tfm, } return crypto_hash_digest(&desc, sg, size, result); } -static inline -unsigned int ll_crypto_tfm_alg_max_keysize(struct crypto_blkcipher *tfm) -{ - return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.max_keysize; -} + static inline unsigned int ll_crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm) { return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.min_keysize; } -#define ll_crypto_hash_blocksize(tfm) crypto_hash_blocksize(tfm) -#define ll_crypto_hash_digestsize(tfm) crypto_hash_digestsize(tfm) -#define ll_crypto_blkcipher_ivsize(tfm) crypto_blkcipher_ivsize(tfm) -#define ll_crypto_blkcipher_blocksize(tfm) crypto_blkcipher_blocksize(tfm) -#define ll_crypto_free_hash(tfm) crypto_free_hash(tfm) -#define ll_crypto_free_blkcipher(tfm) crypto_free_blkcipher(tfm) - #ifdef for_each_possible_cpu #define cfs_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu) #elif defined(for_each_cpu) diff --git a/lustre/obdclass/capa.c b/lustre/obdclass/capa.c index 3872d85..8f67dc7 100644 --- a/lustre/obdclass/capa.c +++ b/lustre/obdclass/capa.c @@ -245,7 +245,7 @@ EXPORT_SYMBOL(capa_lookup); int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key) { - struct ll_crypto_hash *tfm; + struct crypto_hash *tfm; struct capa_hmac_alg *alg; int keylen; struct scatterlist sl; @@ -257,7 +257,7 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key) alg = &capa_hmac_algs[capa_alg(capa)]; - tfm = ll_crypto_alloc_hash(alg->ha_name, 0, 0); + tfm = crypto_alloc_hash(alg->ha_name, 0, 0); if (!tfm) { CERROR("crypto_alloc_tfm failed, check whether your kernel" "has crypto support!\n"); @@ -270,7 +270,7 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key) (unsigned long)(capa) % PAGE_CACHE_SIZE); ll_crypto_hmac(tfm, key, &keylen, &sl, sl.length, hmac); - ll_crypto_free_hash(tfm); + crypto_free_hash(tfm); return 0; } @@ -278,7 +278,7 @@ EXPORT_SYMBOL(capa_hmac); int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) { - struct ll_crypto_cipher *tfm; + struct crypto_blkcipher *tfm; struct scatterlist sd; struct scatterlist ss; struct blkcipher_desc desc; @@ -289,7 +289,7 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) /* passing "aes" in a variable instead of a constant string keeps gcc * 4.3.2 happy */ - tfm = ll_crypto_alloc_blkcipher(alg, 0, 0 ); + tfm = crypto_alloc_blkcipher(alg, 0, 0 ); if (IS_ERR(tfm)) { CERROR("failed to load transform for aes\n"); RETURN(PTR_ERR(tfm)); @@ -301,7 +301,7 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) GOTO(out, rc = -EINVAL); } - rc = ll_crypto_blkcipher_setkey(tfm, key, min); + rc = crypto_blkcipher_setkey(tfm, key, min); if (rc) { CERROR("failed to setting key for aes\n"); GOTO(out, rc); @@ -315,7 +315,7 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) desc.tfm = tfm; desc.info = NULL; desc.flags = 0; - rc = ll_crypto_blkcipher_encrypt(&desc, &sd, &ss, 16); + rc = crypto_blkcipher_encrypt(&desc, &sd, &ss, 16); if (rc) { CERROR("failed to encrypt for aes\n"); GOTO(out, rc); @@ -324,14 +324,14 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) EXIT; out: - ll_crypto_free_blkcipher(tfm); + crypto_free_blkcipher(tfm); return rc; } EXPORT_SYMBOL(capa_encrypt_id); int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) { - struct ll_crypto_cipher *tfm; + struct crypto_blkcipher *tfm; struct scatterlist sd; struct scatterlist ss; struct blkcipher_desc desc; @@ -342,7 +342,7 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) /* passing "aes" in a variable instead of a constant string keeps gcc * 4.3.2 happy */ - tfm = ll_crypto_alloc_blkcipher(alg, 0, 0 ); + tfm = crypto_alloc_blkcipher(alg, 0, 0 ); if (IS_ERR(tfm)) { CERROR("failed to load transform for aes\n"); RETURN(PTR_ERR(tfm)); @@ -354,7 +354,7 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) GOTO(out, rc = -EINVAL); } - rc = ll_crypto_blkcipher_setkey(tfm, key, min); + rc = crypto_blkcipher_setkey(tfm, key, min); if (rc) { CERROR("failed to setting key for aes\n"); GOTO(out, rc); @@ -369,7 +369,7 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) desc.tfm = tfm; desc.info = NULL; desc.flags = 0; - rc = ll_crypto_blkcipher_decrypt(&desc, &sd, &ss, 16); + rc = crypto_blkcipher_decrypt(&desc, &sd, &ss, 16); if (rc) { CERROR("failed to decrypt for aes\n"); GOTO(out, rc); @@ -378,7 +378,7 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) EXIT; out: - ll_crypto_free_blkcipher(tfm); + crypto_free_blkcipher(tfm); return rc; } EXPORT_SYMBOL(capa_decrypt_id); diff --git a/lustre/ptlrpc/gss/gss_krb5.h b/lustre/ptlrpc/gss/gss_krb5.h index 2493bbb..01edf15 100644 --- a/lustre/ptlrpc/gss/gss_krb5.h +++ b/lustre/ptlrpc/gss/gss_krb5.h @@ -74,8 +74,8 @@ struct krb5_header { }; struct krb5_keyblock { - rawobj_t kb_key; - struct ll_crypto_cipher *kb_tfm; + rawobj_t kb_key; + struct crypto_blkcipher *kb_tfm; }; struct krb5_ctx { diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index 7f9a0ab..bd1114f 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -151,14 +151,14 @@ static const char * enctype2str(__u32 enctype) static int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode) { - kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0); + kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0); if (IS_ERR(kb->kb_tfm)) { CERROR("failed to alloc tfm: %s, mode %d\n", alg_name, alg_mode); return -1; } - if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) { + if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) { CERROR("failed to set %s key, len %d\n", alg_name, kb->kb_key.len); return -1; @@ -201,7 +201,7 @@ void keyblock_free(struct krb5_keyblock *kb) { rawobj_free(&kb->kb_key); if (kb->kb_tfm) - ll_crypto_free_blkcipher(kb->kb_tfm); + crypto_free_blkcipher(kb->kb_tfm); } static @@ -533,7 +533,7 @@ void buf_to_sg(struct scatterlist *sg, void *ptr, int len) } static -__u32 krb5_encrypt(struct ll_crypto_cipher *tfm, +__u32 krb5_encrypt(struct crypto_blkcipher *tfm, int decrypt, void * iv, void * in, @@ -550,34 +550,34 @@ __u32 krb5_encrypt(struct ll_crypto_cipher *tfm, desc.info = local_iv; desc.flags= 0; - if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) { + if (length % crypto_blkcipher_blocksize(tfm) != 0) { CERROR("output length %d mismatch blocksize %d\n", - length, ll_crypto_blkcipher_blocksize(tfm)); + length, crypto_blkcipher_blocksize(tfm)); goto out; } - if (ll_crypto_blkcipher_ivsize(tfm) > 16) { - CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm)); + if (crypto_blkcipher_ivsize(tfm) > 16) { + CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm)); goto out; } if (iv) - memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm)); + memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); memcpy(out, in, length); buf_to_sg(&sg, out, length); if (decrypt) - ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length); + ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length); else - ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length); + ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length); out: return(ret); } static inline -int krb5_digest_hmac(struct ll_crypto_hash *tfm, +int krb5_digest_hmac(struct crypto_hash *tfm, rawobj_t *key, struct krb5_header *khdr, int msgcnt, rawobj_t *msgs, @@ -588,17 +588,17 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm, struct scatterlist sg[1]; int i; - ll_crypto_hash_setkey(tfm, key->data, key->len); + crypto_hash_setkey(tfm, key->data, key->len); desc.tfm = tfm; desc.flags= 0; - ll_crypto_hash_init(&desc); + crypto_hash_init(&desc); for (i = 0; i < msgcnt; i++) { if (msgs[i].len == 0) continue; buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len); - ll_crypto_hash_update(&desc, sg, msgs[i].len); + crypto_hash_update(&desc, sg, msgs[i].len); } for (i = 0; i < iovcnt; i++) { @@ -607,19 +607,19 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm, sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, iovs[i].kiov_offset); - ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len); + crypto_hash_update(&desc, sg, iovs[i].kiov_len); } if (khdr) { buf_to_sg(sg, (char *) khdr, sizeof(*khdr)); - ll_crypto_hash_update(&desc, sg, sizeof(*khdr)); + crypto_hash_update(&desc, sg, sizeof(*khdr)); } - return ll_crypto_hash_final(&desc, cksum->data); + return crypto_hash_final(&desc, cksum->data); } static inline -int krb5_digest_norm(struct ll_crypto_hash *tfm, +int krb5_digest_norm(struct crypto_hash *tfm, struct krb5_keyblock *kb, struct krb5_header *khdr, int msgcnt, rawobj_t *msgs, @@ -634,13 +634,13 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm, desc.tfm = tfm; desc.flags= 0; - ll_crypto_hash_init(&desc); + crypto_hash_init(&desc); for (i = 0; i < msgcnt; i++) { if (msgs[i].len == 0) continue; buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len); - ll_crypto_hash_update(&desc, sg, msgs[i].len); + crypto_hash_update(&desc, sg, msgs[i].len); } for (i = 0; i < iovcnt; i++) { @@ -649,15 +649,15 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm, sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, iovs[i].kiov_offset); - ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len); + crypto_hash_update(&desc, sg, iovs[i].kiov_len); } if (khdr) { buf_to_sg(sg, (char *) khdr, sizeof(*khdr)); - ll_crypto_hash_update(&desc, sg, sizeof(*khdr)); + crypto_hash_update(&desc, sg, sizeof(*khdr)); } - ll_crypto_hash_final(&desc, cksum->data); + crypto_hash_final(&desc, cksum->data); return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data, cksum->data, cksum->len); @@ -676,16 +676,16 @@ __s32 krb5_make_checksum(__u32 enctype, rawobj_t *cksum) { struct krb5_enctype *ke = &enctypes[enctype]; - struct ll_crypto_hash *tfm; + struct crypto_hash *tfm; __u32 code = GSS_S_FAILURE; int rc; - if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) { + if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) { CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name); return GSS_S_FAILURE; } - cksum->len = ll_crypto_hash_digestsize(tfm); + cksum->len = crypto_hash_digestsize(tfm); OBD_ALLOC_LARGE(cksum->data, cksum->len); if (!cksum->data) { cksum->len = 0; @@ -702,7 +702,7 @@ __s32 krb5_make_checksum(__u32 enctype, if (rc == 0) code = GSS_S_COMPLETE; out_tfm: - ll_crypto_free_hash(tfm); + crypto_free_hash(tfm); return code; } @@ -880,7 +880,7 @@ int add_padding(rawobj_t *msg, int msg_buflen, int blocksize) } static -int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm, +int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm, int mode_ecb, int inobj_cnt, rawobj_t *inobjs, @@ -907,17 +907,17 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm, if (mode_ecb) { if (enc) - rc = ll_crypto_blkcipher_encrypt( + rc = crypto_blkcipher_encrypt( &desc, &dst, &src, src.length); else - rc = ll_crypto_blkcipher_decrypt( + rc = crypto_blkcipher_decrypt( &desc, &dst, &src, src.length); } else { if (enc) - rc = ll_crypto_blkcipher_encrypt_iv( + rc = crypto_blkcipher_encrypt_iv( &desc, &dst, &src, src.length); else - rc = ll_crypto_blkcipher_decrypt_iv( + rc = crypto_blkcipher_decrypt_iv( &desc, &dst, &src, src.length); } @@ -938,7 +938,7 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm, * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size. */ static -int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, +int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, struct krb5_header *khdr, char *confounder, struct ptlrpc_bulk_desc *desc, @@ -953,7 +953,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, LASSERT(desc->bd_iov_count); LASSERT(desc->bd_enc_iov); - blocksize = ll_crypto_blkcipher_blocksize(tfm); + blocksize = crypto_blkcipher_blocksize(tfm); LASSERT(blocksize > 1); LASSERT(cipher->len == blocksize + sizeof(*khdr)); @@ -965,7 +965,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, buf_to_sg(&src, confounder, blocksize); buf_to_sg(&dst, cipher->data, blocksize); - rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize); + rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize); if (rc) { CERROR("error to encrypt confounder: %d\n", rc); return rc; @@ -985,7 +985,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, desc->bd_enc_iov[i].kiov_offset = dst.offset; desc->bd_enc_iov[i].kiov_len = dst.length; - rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, + rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, src.length); if (rc) { CERROR("error to encrypt page: %d\n", rc); @@ -997,8 +997,8 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, buf_to_sg(&src, khdr, sizeof(*khdr)); buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr)); - rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, - &dst, &src, sizeof(*khdr)); + rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, + sizeof(*khdr)); if (rc) { CERROR("error to encrypt krb5 header: %d\n", rc); return rc; @@ -1027,7 +1027,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, * should have been done by prep_bulk(). */ static -int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm, +int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, struct krb5_header *khdr, struct ptlrpc_bulk_desc *desc, rawobj_t *cipher, @@ -1044,7 +1044,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm, LASSERT(desc->bd_enc_iov); LASSERT(desc->bd_nob_transferred); - blocksize = ll_crypto_blkcipher_blocksize(tfm); + blocksize = crypto_blkcipher_blocksize(tfm); LASSERT(blocksize > 1); LASSERT(cipher->len == blocksize + sizeof(*khdr)); @@ -1061,7 +1061,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm, buf_to_sg(&src, cipher->data, blocksize); buf_to_sg(&dst, plain->data, blocksize); - rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize); + rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize); if (rc) { CERROR("error to decrypt confounder: %d\n", rc); return rc; @@ -1104,8 +1104,8 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm, if (desc->bd_iov[i].kiov_len % blocksize == 0) sg_assign_page(&dst, desc->bd_iov[i].kiov_page); - rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, - src.length); + rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, + src.length); if (rc) { CERROR("error to decrypt page: %d\n", rc); return rc; @@ -1144,8 +1144,8 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm, buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr)); buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr)); - rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, - &dst, &src, sizeof(*khdr)); + rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, + sizeof(*khdr)); if (rc) { CERROR("error to decrypt tail: %d\n", rc); return rc; @@ -1179,7 +1179,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK); LASSERT(kctx->kc_keye.kb_tfm == NULL || ke->ke_conf_size >= - ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm)); + crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm)); /* * final token format: @@ -1203,7 +1203,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, blocksize = 1; } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); } LASSERT(blocksize <= ke->ke_conf_size); @@ -1248,23 +1248,23 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, cipher.len = token->len - sizeof(*khdr); LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr)); - if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { - rawobj_t arc4_keye; - struct ll_crypto_cipher *arc4_tfm; + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + rawobj_t arc4_keye; + struct crypto_blkcipher *arc4_tfm; - if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, - NULL, 1, &cksum, 0, NULL, &arc4_keye)) { - CERROR("failed to obtain arc4 enc key\n"); - GOTO(arc4_out, rc = -EACCES); - } + if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, + NULL, 1, &cksum, 0, NULL, &arc4_keye)) { + CERROR("failed to obtain arc4 enc key\n"); + GOTO(arc4_out, rc = -EACCES); + } - arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0); + arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0); if (IS_ERR(arc4_tfm)) { CERROR("failed to alloc tfm arc4 in ECB mode\n"); GOTO(arc4_out_key, rc = -EACCES); } - if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, + if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, arc4_keye.len)) { CERROR("failed to set arc4 key, len %d\n", arc4_keye.len); @@ -1274,7 +1274,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, rc = krb5_encrypt_rawobjs(arc4_tfm, 1, 3, data_desc, &cipher, 1); arc4_out_tfm: - ll_crypto_free_blkcipher(arc4_tfm); + crypto_free_blkcipher(arc4_tfm); arc4_out_key: rawobj_free(&arc4_keye); arc4_out: @@ -1312,7 +1312,7 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx, LASSERT(desc->bd_enc_iov); LASSERT(kctx->kc_keye.kb_tfm); - blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); for (i = 0; i < desc->bd_iov_count; i++) { LASSERT(desc->bd_enc_iov[i].kiov_page); @@ -1373,7 +1373,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, blocksize = 1; } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); } /* @@ -1483,7 +1483,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, blocksize = 1; } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); } /* expected token layout: @@ -1521,26 +1521,26 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, plain_out.data = tmpbuf; plain_out.len = bodysize; - if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { - rawobj_t arc4_keye; - struct ll_crypto_cipher *arc4_tfm; + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + rawobj_t arc4_keye; + struct crypto_blkcipher *arc4_tfm; - cksum.data = token->data + token->len - ke->ke_hash_size; - cksum.len = ke->ke_hash_size; + cksum.data = token->data + token->len - ke->ke_hash_size; + cksum.len = ke->ke_hash_size; - if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, - NULL, 1, &cksum, 0, NULL, &arc4_keye)) { - CERROR("failed to obtain arc4 enc key\n"); - GOTO(arc4_out, rc = -EACCES); - } + if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, + NULL, 1, &cksum, 0, NULL, &arc4_keye)) { + CERROR("failed to obtain arc4 enc key\n"); + GOTO(arc4_out, rc = -EACCES); + } - arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0); + arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0); if (IS_ERR(arc4_tfm)) { CERROR("failed to alloc tfm arc4 in ECB mode\n"); GOTO(arc4_out_key, rc = -EACCES); } - if (ll_crypto_blkcipher_setkey(arc4_tfm, + if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, arc4_keye.len)) { CERROR("failed to set arc4 key, len %d\n", arc4_keye.len); @@ -1550,7 +1550,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, rc = krb5_encrypt_rawobjs(arc4_tfm, 1, 1, &cipher_in, &plain_out, 0); arc4_out_tfm: - ll_crypto_free_blkcipher(arc4_tfm); + crypto_free_blkcipher(arc4_tfm); arc4_out_key: rawobj_free(&arc4_keye); arc4_out: @@ -1649,7 +1649,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, LBUG(); } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); } LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);