X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fgss%2Fgss_krb5_mech.c;h=09895ba43487e9e2f2a31d5ffed9ba98dbd44a42;hb=671c1b0c705640d63a1d3be7016c79afd10bc8df;hp=7eb0c951dac4183834ec5452f3e271ee76f3e818;hpb=744f32ac9efb1e2f2837992703c5a5e35f261e60;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index 7eb0c95..09895ba 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -1,9 +1,9 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * Modifications for Lustre * - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * + * Copyright (c) 2011, 2015, Intel Corporation. * * Author: Eric Mei */ @@ -48,20 +48,12 @@ * */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #define DEBUG_SUBSYSTEM S_SEC -#ifdef __KERNEL__ #include #include #include #include -#include #include -#else -#include -#endif #include #include @@ -155,14 +147,14 @@ static const char * enctype2str(__u32 enctype) static int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode) { - kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0); - if (kb->kb_tfm == NULL) { - CERROR("failed to alloc tfm: %s, mode %d\n", - alg_name, alg_mode); - return -1; - } - - if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) { + kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0); + if (IS_ERR(kb->kb_tfm)) { + CERROR("failed to alloc tfm: %s, mode %d\n", + alg_name, alg_mode); + return -1; + } + + if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) { CERROR("failed to set %s key, len %d\n", alg_name, kb->kb_key.len); return -1; @@ -205,7 +197,7 @@ void keyblock_free(struct krb5_keyblock *kb) { rawobj_free(&kb->kb_key); if (kb->kb_tfm) - ll_crypto_free_blkcipher(kb->kb_tfm); + crypto_free_blkcipher(kb->kb_tfm); } static @@ -241,7 +233,7 @@ int get_rawobj(char **ptr, const char *end, rawobj_t *res) if (q > end || q < p) return -1; - OBD_ALLOC(res->data, len); + OBD_ALLOC_LARGE(res->data, len); if (!res->data) return -1; @@ -257,12 +249,12 @@ int get_keyblock(char **ptr, const char *end, { char *buf; - OBD_ALLOC(buf, keysize); + OBD_ALLOC_LARGE(buf, keysize); if (buf == NULL) return -1; if (get_bytes(ptr, end, buf, keysize)) { - OBD_FREE(buf, keysize); + OBD_FREE_LARGE(buf, keysize); return -1; } @@ -349,7 +341,7 @@ __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end) if (p != end) goto out_err; - CDEBUG(D_SEC, "succesfully imported rfc1964 context\n"); + CDEBUG(D_SEC, "successfully imported rfc1964 context\n"); return 0; out_err: return GSS_S_FAILURE; @@ -411,7 +403,7 @@ __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end) if (get_keyblock(&p, end, &kctx->kc_keyc, keysize)) goto out_err; - CDEBUG(D_SEC, "succesfully imported v2 context\n"); + CDEBUG(D_SEC, "successfully imported v2 context\n"); return 0; out_err: return GSS_S_FAILURE; @@ -502,7 +494,7 @@ __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx, goto out_err; gctx_new->internal_ctx_id = knew; - CDEBUG(D_SEC, "succesfully copied reverse context\n"); + CDEBUG(D_SEC, "successfully copied reverse context\n"); return GSS_S_COMPLETE; out_err: @@ -533,13 +525,12 @@ void gss_delete_sec_context_kerberos(void *internal_ctx) static void buf_to_sg(struct scatterlist *sg, void *ptr, int len) { - sg->page = virt_to_page(ptr); - sg->offset = offset_in_page(ptr); - sg->length = len; + sg_init_table(sg, 1); + sg_set_buf(sg, ptr, len); } static -__u32 krb5_encrypt(struct ll_crypto_cipher *tfm, +__u32 krb5_encrypt(struct crypto_blkcipher *tfm, int decrypt, void * iv, void * in, @@ -556,36 +547,34 @@ __u32 krb5_encrypt(struct ll_crypto_cipher *tfm, desc.info = local_iv; desc.flags= 0; - if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) { + if (length % crypto_blkcipher_blocksize(tfm) != 0) { CERROR("output length %d mismatch blocksize %d\n", - length, ll_crypto_blkcipher_blocksize(tfm)); + length, crypto_blkcipher_blocksize(tfm)); goto out; } - if (ll_crypto_blkcipher_ivsize(tfm) > 16) { - CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm)); + if (crypto_blkcipher_ivsize(tfm) > 16) { + CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm)); goto out; } if (iv) - memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm)); + memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); memcpy(out, in, length); buf_to_sg(&sg, out, length); if (decrypt) - ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length); + ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length); else - ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length); + ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length); out: return(ret); } -#ifdef HAVE_ASYNC_BLOCK_CIPHER - static inline -int krb5_digest_hmac(struct ll_crypto_hash *tfm, +int krb5_digest_hmac(struct crypto_hash *tfm, rawobj_t *key, struct krb5_header *khdr, int msgcnt, rawobj_t *msgs, @@ -596,80 +585,39 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm, struct scatterlist sg[1]; int i; - ll_crypto_hash_setkey(tfm, key->data, key->len); + crypto_hash_setkey(tfm, key->data, key->len); desc.tfm = tfm; desc.flags= 0; - ll_crypto_hash_init(&desc); + crypto_hash_init(&desc); for (i = 0; i < msgcnt; i++) { if (msgs[i].len == 0) continue; buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len); - ll_crypto_hash_update(&desc, sg, msgs[i].len); + crypto_hash_update(&desc, sg, msgs[i].len); } for (i = 0; i < iovcnt; i++) { if (iovs[i].kiov_len == 0) continue; - sg[0].page = iovs[i].kiov_page; - sg[0].offset = iovs[i].kiov_offset; - sg[0].length = iovs[i].kiov_len; - ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len); - } - - if (khdr) { - buf_to_sg(sg, (char *) khdr, sizeof(*khdr)); - ll_crypto_hash_update(&desc, sg, sizeof(*khdr)); - } - - return ll_crypto_hash_final(&desc, cksum->data); -} - -#else /* ! HAVE_ASYNC_BLOCK_CIPHER */ - -static inline -int krb5_digest_hmac(struct ll_crypto_hash *tfm, - rawobj_t *key, - struct krb5_header *khdr, - int msgcnt, rawobj_t *msgs, - int iovcnt, lnet_kiov_t *iovs, - rawobj_t *cksum) -{ - struct scatterlist sg[1]; - __u32 keylen = key->len, i; - - crypto_hmac_init(tfm, key->data, &keylen); - - for (i = 0; i < msgcnt; i++) { - if (msgs[i].len == 0) - continue; - buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len); - crypto_hmac_update(tfm, sg, 1); - } - for (i = 0; i < iovcnt; i++) { - if (iovs[i].kiov_len == 0) - continue; - sg[0].page = iovs[i].kiov_page; - sg[0].offset = iovs[i].kiov_offset; - sg[0].length = iovs[i].kiov_len; - crypto_hmac_update(tfm, sg, 1); + sg_init_table(sg, 1); + sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, + iovs[i].kiov_offset); + crypto_hash_update(&desc, sg, iovs[i].kiov_len); } if (khdr) { buf_to_sg(sg, (char *) khdr, sizeof(*khdr)); - crypto_hmac_update(tfm, sg, 1); + crypto_hash_update(&desc, sg, sizeof(*khdr)); } - crypto_hmac_final(tfm, key->data, &keylen, cksum->data); - return 0; + return crypto_hash_final(&desc, cksum->data); } -#endif /* HAVE_ASYNC_BLOCK_CIPHER */ - static inline -int krb5_digest_norm(struct ll_crypto_hash *tfm, +int krb5_digest_norm(struct crypto_hash *tfm, struct krb5_keyblock *kb, struct krb5_header *khdr, int msgcnt, rawobj_t *msgs, @@ -684,30 +632,31 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm, desc.tfm = tfm; desc.flags= 0; - ll_crypto_hash_init(&desc); + crypto_hash_init(&desc); for (i = 0; i < msgcnt; i++) { if (msgs[i].len == 0) continue; buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len); - ll_crypto_hash_update(&desc, sg, msgs[i].len); + crypto_hash_update(&desc, sg, msgs[i].len); } for (i = 0; i < iovcnt; i++) { if (iovs[i].kiov_len == 0) continue; - sg[0].page = iovs[i].kiov_page; - sg[0].offset = iovs[i].kiov_offset; - sg[0].length = iovs[i].kiov_len; - ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len); + + sg_init_table(sg, 1); + sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, + iovs[i].kiov_offset); + crypto_hash_update(&desc, sg, iovs[i].kiov_len); } if (khdr) { buf_to_sg(sg, (char *) khdr, sizeof(*khdr)); - ll_crypto_hash_update(&desc, sg, sizeof(*khdr)); + crypto_hash_update(&desc, sg, sizeof(*khdr)); } - ll_crypto_hash_final(&desc, cksum->data); + crypto_hash_final(&desc, cksum->data); return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data, cksum->data, cksum->len); @@ -726,17 +675,17 @@ __s32 krb5_make_checksum(__u32 enctype, rawobj_t *cksum) { struct krb5_enctype *ke = &enctypes[enctype]; - struct ll_crypto_hash *tfm; + struct crypto_hash *tfm; __u32 code = GSS_S_FAILURE; int rc; - if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) { + if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) { CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name); return GSS_S_FAILURE; } - cksum->len = ll_crypto_hash_digestsize(tfm); - OBD_ALLOC(cksum->data, cksum->len); + cksum->len = crypto_hash_digestsize(tfm); + OBD_ALLOC_LARGE(cksum->data, cksum->len); if (!cksum->data) { cksum->len = 0; goto out_tfm; @@ -752,7 +701,7 @@ __s32 krb5_make_checksum(__u32 enctype, if (rc == 0) code = GSS_S_COMPLETE; out_tfm: - ll_crypto_free_hash(tfm); + crypto_free_hash(tfm); return code; } @@ -777,9 +726,9 @@ static void fill_krb5_header(struct krb5_ctx *kctx, } khdr->kh_filler = 0xff; - spin_lock(&krb5_seq_lock); - khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++); - spin_unlock(&krb5_seq_lock); + spin_lock(&krb5_seq_lock); + khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++); + spin_unlock(&krb5_seq_lock); } static __u32 verify_krb5_header(struct krb5_ctx *kctx, @@ -930,7 +879,7 @@ int add_padding(rawobj_t *msg, int msg_buflen, int blocksize) } static -int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm, +int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm, int mode_ecb, int inobj_cnt, rawobj_t *inobjs, @@ -957,17 +906,17 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm, if (mode_ecb) { if (enc) - rc = ll_crypto_blkcipher_encrypt( + rc = crypto_blkcipher_encrypt( &desc, &dst, &src, src.length); else - rc = ll_crypto_blkcipher_decrypt( + rc = crypto_blkcipher_decrypt( &desc, &dst, &src, src.length); } else { if (enc) - rc = ll_crypto_blkcipher_encrypt_iv( + rc = crypto_blkcipher_encrypt_iv( &desc, &dst, &src, src.length); else - rc = ll_crypto_blkcipher_decrypt_iv( + rc = crypto_blkcipher_decrypt_iv( &desc, &dst, &src, src.length); } @@ -984,8 +933,11 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm, RETURN(0); } +/* + * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size. + */ static -int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, +int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, struct krb5_header *khdr, char *confounder, struct ptlrpc_bulk_desc *desc, @@ -997,10 +949,11 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, struct scatterlist src, dst; int blocksize, i, rc, nob = 0; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(desc->bd_iov_count); - LASSERT(desc->bd_enc_iov); + LASSERT(GET_ENC_KIOV(desc)); - blocksize = ll_crypto_blkcipher_blocksize(tfm); + blocksize = crypto_blkcipher_blocksize(tfm); LASSERT(blocksize > 1); LASSERT(cipher->len == blocksize + sizeof(*khdr)); @@ -1012,7 +965,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, buf_to_sg(&src, confounder, blocksize); buf_to_sg(&dst, cipher->data, blocksize); - rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize); + rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize); if (rc) { CERROR("error to encrypt confounder: %d\n", rc); return rc; @@ -1020,22 +973,22 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, /* encrypt clear pages */ for (i = 0; i < desc->bd_iov_count; i++) { - src.page = desc->bd_iov[i].kiov_page; - src.offset = desc->bd_iov[i].kiov_offset; - src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) & - (~(blocksize - 1)); - - if (adj_nob) - nob += src.length; - - dst.page = desc->bd_enc_iov[i].kiov_page; - dst.offset = src.offset; - dst.length = src.length; - - desc->bd_enc_iov[i].kiov_offset = dst.offset; - desc->bd_enc_iov[i].kiov_len = dst.length; - - rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, + sg_init_table(&src, 1); + sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page, + (BD_GET_KIOV(desc, i).kiov_len + + blocksize - 1) & + (~(blocksize - 1)), + BD_GET_KIOV(desc, i).kiov_offset); + if (adj_nob) + nob += src.length; + sg_init_table(&dst, 1); + sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page, + src.length, src.offset); + + BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset; + BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length; + + rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, src.length); if (rc) { CERROR("error to encrypt page: %d\n", rc); @@ -1047,8 +1000,8 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, buf_to_sg(&src, khdr, sizeof(*khdr)); buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr)); - rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, - &dst, &src, sizeof(*khdr)); + rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, + sizeof(*khdr)); if (rc) { CERROR("error to encrypt krb5 header: %d\n", rc); return rc; @@ -1063,13 +1016,28 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, /* * desc->bd_nob_transferred is the size of cipher text received. * desc->bd_nob is the target size of plain text supposed to be. + * + * if adj_nob != 0, we adjust each page's kiov_len to the actual + * plain text size. + * - for client read: we don't know data size for each page, so + * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might + * be smaller, so we need to adjust it according to + * bd_u.bd_kiov.bd_enc_vec[]->kiov_len. + * this means we DO NOT support the situation that server send an odd size + * data in a page which is not the last one. + * - for server write: we knows exactly data size for each page being expected, + * thus kiov_len is accurate already, so we should not adjust it at all. + * and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be + * round_up(bd_iov[]->kiov_len) which + * should have been done by prep_bulk(). */ static -int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm, +int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, struct krb5_header *khdr, struct ptlrpc_bulk_desc *desc, rawobj_t *cipher, - rawobj_t *plain) + rawobj_t *plain, + int adj_nob) { struct blkcipher_desc ciph_desc; __u8 local_iv[16] = {0}; @@ -1077,11 +1045,12 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm, int ct_nob = 0, pt_nob = 0; int blocksize, i, rc; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(desc->bd_iov_count); - LASSERT(desc->bd_enc_iov); + LASSERT(GET_ENC_KIOV(desc)); LASSERT(desc->bd_nob_transferred); - blocksize = ll_crypto_blkcipher_blocksize(tfm); + blocksize = crypto_blkcipher_blocksize(tfm); LASSERT(blocksize > 1); LASSERT(cipher->len == blocksize + sizeof(*khdr)); @@ -1098,68 +1067,101 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm, buf_to_sg(&src, cipher->data, blocksize); buf_to_sg(&dst, plain->data, blocksize); - rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize); + rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize); if (rc) { CERROR("error to decrypt confounder: %d\n", rc); return rc; } - /* - * decrypt clear pages. note the enc_iov is prepared by prep_bulk() - * which already done some sanity checkings. - * - * desc->bd_nob is the actual plain text size supposed to be - * transferred. desc->bd_nob_transferred is the actual cipher - * text received. - */ - for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred; - i++) { - if (desc->bd_enc_iov[i].kiov_len == 0) - continue; - - if (ct_nob + desc->bd_enc_iov[i].kiov_len > - desc->bd_nob_transferred) - desc->bd_enc_iov[i].kiov_len = - desc->bd_nob_transferred - ct_nob; - - desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len; - if (pt_nob + desc->bd_enc_iov[i].kiov_len > desc->bd_nob) - desc->bd_iov[i].kiov_len = desc->bd_nob - pt_nob; - - src.page = desc->bd_enc_iov[i].kiov_page; - src.offset = desc->bd_enc_iov[i].kiov_offset; - src.length = desc->bd_enc_iov[i].kiov_len; - - dst = src; - - if (desc->bd_iov[i].kiov_offset % blocksize == 0) - dst.page = desc->bd_iov[i].kiov_page; - - rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, - src.length); + for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred; + i++) { + if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize + != 0 || + BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize + != 0) { + CERROR("page %d: odd offset %u len %u, blocksize %d\n", + i, BD_GET_ENC_KIOV(desc, i).kiov_offset, + BD_GET_ENC_KIOV(desc, i).kiov_len, + blocksize); + return -EFAULT; + } + + if (adj_nob) { + if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len > + desc->bd_nob_transferred) + BD_GET_ENC_KIOV(desc, i).kiov_len = + desc->bd_nob_transferred - ct_nob; + + BD_GET_KIOV(desc, i).kiov_len = + BD_GET_ENC_KIOV(desc, i).kiov_len; + if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len > + desc->bd_nob) + BD_GET_KIOV(desc, i).kiov_len = + desc->bd_nob - pt_nob; + } else { + /* this should be guaranteed by LNET */ + LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i). + kiov_len <= + desc->bd_nob_transferred); + LASSERT(BD_GET_KIOV(desc, i).kiov_len <= + BD_GET_ENC_KIOV(desc, i).kiov_len); + } + + if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0) + continue; + + sg_init_table(&src, 1); + sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page, + BD_GET_ENC_KIOV(desc, i).kiov_len, + BD_GET_ENC_KIOV(desc, i).kiov_offset); + dst = src; + if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0) + sg_assign_page(&dst, + BD_GET_KIOV(desc, i).kiov_page); + + rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, + src.length); if (rc) { CERROR("error to decrypt page: %d\n", rc); return rc; } - if (desc->bd_iov[i].kiov_offset % blocksize) { - memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) + - desc->bd_iov[i].kiov_offset, - cfs_page_address(desc->bd_enc_iov[i].kiov_page) + - desc->bd_iov[i].kiov_offset, - desc->bd_iov[i].kiov_len); - } + if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) { + memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) + + BD_GET_KIOV(desc, i).kiov_offset, + page_address(BD_GET_ENC_KIOV(desc, i). + kiov_page) + + BD_GET_KIOV(desc, i).kiov_offset, + BD_GET_KIOV(desc, i).kiov_len); + } + + ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len; + pt_nob += BD_GET_KIOV(desc, i).kiov_len; + } + + if (unlikely(ct_nob != desc->bd_nob_transferred)) { + CERROR("%d cipher text transferred but only %d decrypted\n", + desc->bd_nob_transferred, ct_nob); + return -EFAULT; + } - ct_nob += desc->bd_enc_iov[i].kiov_len; - pt_nob += desc->bd_iov[i].kiov_len; + if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) { + CERROR("%d plain text expected but only %d received\n", + desc->bd_nob, pt_nob); + return -EFAULT; } + /* if needed, clear up the rest unused iovs */ + if (adj_nob) + while (i < desc->bd_iov_count) + BD_GET_KIOV(desc, i++).kiov_len = 0; + /* decrypt tail (krb5 header) */ buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr)); buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr)); - rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, - &dst, &src, sizeof(*khdr)); + rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, + sizeof(*khdr)); if (rc) { CERROR("error to decrypt tail: %d\n", rc); return rc; @@ -1193,7 +1195,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK); LASSERT(kctx->kc_keye.kb_tfm == NULL || ke->ke_conf_size >= - ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm)); + crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm)); /* * final token format: @@ -1208,7 +1210,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, fill_krb5_header(kctx, khdr, 1); /* generate confounder */ - get_random_bytes(conf, ke->ke_conf_size); + cfs_get_random_bytes(conf, ke->ke_conf_size); /* get encryption blocksize. note kc_keye might not associated with * a tfm, currently only for arcfour-hmac */ @@ -1217,7 +1219,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, blocksize = 1; } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); } LASSERT(blocksize <= ke->ke_conf_size); @@ -1262,23 +1264,23 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, cipher.len = token->len - sizeof(*khdr); LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr)); - if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { - rawobj_t arc4_keye; - struct ll_crypto_cipher *arc4_tfm; + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + rawobj_t arc4_keye; + struct crypto_blkcipher *arc4_tfm; - if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, - NULL, 1, &cksum, 0, NULL, &arc4_keye)) { - CERROR("failed to obtain arc4 enc key\n"); - GOTO(arc4_out, rc = -EACCES); - } + if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, + NULL, 1, &cksum, 0, NULL, &arc4_keye)) { + CERROR("failed to obtain arc4 enc key\n"); + GOTO(arc4_out, rc = -EACCES); + } - arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0); - if (arc4_tfm == NULL) { - CERROR("failed to alloc tfm arc4 in ECB mode\n"); - GOTO(arc4_out_key, rc = -EACCES); - } + arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0); + if (IS_ERR(arc4_tfm)) { + CERROR("failed to alloc tfm arc4 in ECB mode\n"); + GOTO(arc4_out_key, rc = -EACCES); + } - if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, + if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, arc4_keye.len)) { CERROR("failed to set arc4 key, len %d\n", arc4_keye.len); @@ -1288,7 +1290,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, rc = krb5_encrypt_rawobjs(arc4_tfm, 1, 3, data_desc, &cipher, 1); arc4_out_tfm: - ll_crypto_free_blkcipher(arc4_tfm); + crypto_free_blkcipher(arc4_tfm); arc4_out_key: rawobj_free(&arc4_keye); arc4_out: @@ -1317,35 +1319,38 @@ arc4_out: static __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx, - struct ptlrpc_bulk_desc *desc) + struct ptlrpc_bulk_desc *desc) { - struct krb5_ctx *kctx = gctx->internal_ctx_id; - int blocksize, i; - - LASSERT(desc->bd_iov_count); - LASSERT(desc->bd_enc_iov); - LASSERT(kctx->kc_keye.kb_tfm); - - blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); - - for (i = 0; i < desc->bd_iov_count; i++) { - LASSERT(desc->bd_enc_iov[i].kiov_page); - /* - * offset should always start at page boundary of either - * client or server side. - */ - if (desc->bd_iov[i].kiov_offset & blocksize) { - CERROR("odd offset %d in page %d\n", - desc->bd_iov[i].kiov_offset, i); - return GSS_S_FAILURE; - } - - desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset; - desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len + - blocksize - 1) & (~(blocksize - 1)); - } - - return GSS_S_COMPLETE; + struct krb5_ctx *kctx = gctx->internal_ctx_id; + int blocksize, i; + + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + LASSERT(desc->bd_iov_count); + LASSERT(GET_ENC_KIOV(desc)); + LASSERT(kctx->kc_keye.kb_tfm); + + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + + for (i = 0; i < desc->bd_iov_count; i++) { + LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page); + /* + * offset should always start at page boundary of either + * client or server side. + */ + if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) { + CERROR("odd offset %d in page %d\n", + BD_GET_KIOV(desc, i).kiov_offset, i); + return GSS_S_FAILURE; + } + + BD_GET_ENC_KIOV(desc, i).kiov_offset = + BD_GET_KIOV(desc, i).kiov_offset; + BD_GET_ENC_KIOV(desc, i).kiov_len = + (BD_GET_KIOV(desc, i).kiov_len + + blocksize - 1) & (~(blocksize - 1)); + } + + return GSS_S_COMPLETE; } static @@ -1362,6 +1367,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, __u8 conf[GSS_MAX_CIPHER_BLOCK]; int rc = 0; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(ke); LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK); @@ -1378,7 +1384,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, fill_krb5_header(kctx, khdr, 1); /* generate confounder */ - get_random_bytes(conf, ke->ke_conf_size); + cfs_get_random_bytes(conf, ke->ke_conf_size); /* get encryption blocksize. note kc_keye might not associated with * a tfm, currently only for arcfour-hmac */ @@ -1387,7 +1393,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, blocksize = 1; } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); } /* @@ -1408,13 +1414,13 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, data_desc[0].data = conf; data_desc[0].len = ke->ke_conf_size; - /* compute checksum */ - if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, - khdr, 1, data_desc, - desc->bd_iov_count, desc->bd_iov, - &cksum)) - return GSS_S_FAILURE; - LASSERT(cksum.len >= ke->ke_hash_size); + /* compute checksum */ + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 1, data_desc, + desc->bd_iov_count, GET_KIOV(desc), + &cksum)) + return GSS_S_FAILURE; + LASSERT(cksum.len >= ke->ke_hash_size); /* * clear text layout for encryption: @@ -1497,7 +1503,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, blocksize = 1; } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); } /* expected token layout: @@ -1524,7 +1530,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, } /* decrypting */ - OBD_ALLOC(tmpbuf, bodysize); + OBD_ALLOC_LARGE(tmpbuf, bodysize); if (!tmpbuf) return GSS_S_FAILURE; @@ -1535,26 +1541,26 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, plain_out.data = tmpbuf; plain_out.len = bodysize; - if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { - rawobj_t arc4_keye; - struct ll_crypto_cipher *arc4_tfm; + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + rawobj_t arc4_keye; + struct crypto_blkcipher *arc4_tfm; - cksum.data = token->data + token->len - ke->ke_hash_size; - cksum.len = ke->ke_hash_size; + cksum.data = token->data + token->len - ke->ke_hash_size; + cksum.len = ke->ke_hash_size; - if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, - NULL, 1, &cksum, 0, NULL, &arc4_keye)) { - CERROR("failed to obtain arc4 enc key\n"); - GOTO(arc4_out, rc = -EACCES); - } + if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, + NULL, 1, &cksum, 0, NULL, &arc4_keye)) { + CERROR("failed to obtain arc4 enc key\n"); + GOTO(arc4_out, rc = -EACCES); + } - arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0); - if (arc4_tfm == NULL) { - CERROR("failed to alloc tfm arc4 in ECB mode\n"); - GOTO(arc4_out_key, rc = -EACCES); - } + arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0); + if (IS_ERR(arc4_tfm)) { + CERROR("failed to alloc tfm arc4 in ECB mode\n"); + GOTO(arc4_out_key, rc = -EACCES); + } - if (ll_crypto_blkcipher_setkey(arc4_tfm, + if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, arc4_keye.len)) { CERROR("failed to set arc4 key, len %d\n", arc4_keye.len); @@ -1564,7 +1570,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, rc = krb5_encrypt_rawobjs(arc4_tfm, 1, 1, &cipher_in, &plain_out, 0); arc4_out_tfm: - ll_crypto_free_blkcipher(arc4_tfm); + crypto_free_blkcipher(arc4_tfm); arc4_out_key: rawobj_free(&arc4_keye); arc4_out: @@ -1621,7 +1627,7 @@ arc4_out: major = GSS_S_COMPLETE; out_free: - OBD_FREE(tmpbuf, bodysize); + OBD_FREE_LARGE(tmpbuf, bodysize); rawobj_free(&cksum); return major; } @@ -1629,7 +1635,7 @@ out_free: static __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, struct ptlrpc_bulk_desc *desc, - rawobj_t *token) + rawobj_t *token, int adj_nob) { struct krb5_ctx *kctx = gctx->internal_ctx_id; struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; @@ -1641,6 +1647,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, int rc; __u32 major; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(ke); if (token->len < sizeof(*khdr)) { @@ -1663,7 +1670,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, LBUG(); } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); } LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0); @@ -1685,7 +1692,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, plain.len = cipher.len; rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr, - desc, &cipher, &plain); + desc, &cipher, &plain, adj_nob); if (rc) return GSS_S_DEFECTIVE_TOKEN; @@ -1698,12 +1705,13 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, data_desc[0].data = plain.data; data_desc[0].len = blocksize; - if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, - khdr, 1, data_desc, - desc->bd_iov_count, desc->bd_iov, - &cksum)) - return GSS_S_FAILURE; - LASSERT(cksum.len >= ke->ke_hash_size); + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 1, data_desc, + desc->bd_iov_count, + GET_KIOV(desc), + &cksum)) + return GSS_S_FAILURE; + LASSERT(cksum.len >= ke->ke_hash_size); if (memcmp(plain.data + blocksize + sizeof(*khdr), cksum.data + cksum.len - ke->ke_hash_size, @@ -1786,17 +1794,17 @@ static struct gss_api_mech gss_kerberos_mech = { int __init init_kerberos_module(void) { - int status; + int status; - spin_lock_init(&krb5_seq_lock); + spin_lock_init(&krb5_seq_lock); - status = lgss_mech_register(&gss_kerberos_mech); - if (status) - CERROR("Failed to register kerberos gss mechanism!\n"); - return status; + status = lgss_mech_register(&gss_kerberos_mech); + if (status) + CERROR("Failed to register kerberos gss mechanism!\n"); + return status; } -void __exit cleanup_kerberos_module(void) +void cleanup_kerberos_module(void) { lgss_mech_unregister(&gss_kerberos_mech); }