X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fgss%2Fgss_krb5_mech.c;h=5bd9f08e54b84dd9b5409802dfd52246eeb29c20;hb=495778754c1e8935ecd9c308e23f9797165863c5;hp=b0f9292dcae13bb130c40a6c4cf0edfac1b2b3b2;hpb=d2d56f38da01001c92a09afc6b52b5acbd9bc13c;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index b0f9292..5bd9f08 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -2,8 +2,9 @@ * vim:expandtab:shiftwidth=8:tabstop=8: * * Modifications for Lustre - * Copyright 2004 - 2006, Cluster File Systems, Inc. - * All rights reserved + * + * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * * Author: Eric Mei */ @@ -57,6 +58,7 @@ #include #include #include +#include #else #include #endif @@ -75,7 +77,7 @@ #include "gss_asn1.h" #include "gss_krb5.h" -spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED; +static spinlock_t krb5_seq_lock; struct krb5_enctype { char *ke_dispname; @@ -95,45 +97,45 @@ struct krb5_enctype { static struct krb5_enctype enctypes[] = { [ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */ "des-cbc-md5", - "des", + "cbc(des)", "md5", - CRYPTO_TFM_MODE_CBC, + 0, 16, 8, 0, }, [ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */ - "des-hmac-sha1", - "des3_ede", - "sha1", - CRYPTO_TFM_MODE_CBC, + "des3-hmac-sha1", + "cbc(des3_ede)", + "hmac(sha1)", + 0, 20, 8, 1, }, [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */ "aes128-cts-hmac-sha1-96", - "aes", - "sha1", - CRYPTO_TFM_MODE_CBC, + "cbc(aes)", + "hmac(sha1)", + 0, 12, 16, 1, }, [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */ "aes256-cts-hmac-sha1-96", - "aes", - "sha1", - CRYPTO_TFM_MODE_CBC, + "cbc(aes)", + "hmac(sha1)", + 0, 12, 16, 1, }, [ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */ "arcfour-hmac-md5", - "arc4", - "md5", - CRYPTO_TFM_MODE_ECB, + "ecb(arc4)", + "hmac(md5)", + 0, 16, 8, 1, @@ -153,14 +155,14 @@ static const char * enctype2str(__u32 enctype) static int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode) { - kb->kb_tfm = crypto_alloc_tfm(alg_name, alg_mode); + kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0); if (kb->kb_tfm == NULL) { CERROR("failed to alloc tfm: %s, mode %d\n", alg_name, alg_mode); return -1; } - if (crypto_cipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) { + if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) { CERROR("failed to set %s key, len %d\n", alg_name, kb->kb_key.len); return -1; @@ -203,7 +205,7 @@ void keyblock_free(struct krb5_keyblock *kb) { rawobj_free(&kb->kb_key); if (kb->kb_tfm) - crypto_free_tfm(kb->kb_tfm); + ll_crypto_free_blkcipher(kb->kb_tfm); } static @@ -480,12 +482,8 @@ __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx, knew->kc_cfx = kctx->kc_cfx; knew->kc_seed_init = kctx->kc_seed_init; knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey; -#if 0 knew->kc_endtime = kctx->kc_endtime; -#else - /* FIXME reverse context don't expire for now */ - knew->kc_endtime = INT_MAX; -#endif + memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed)); knew->kc_seq_send = kctx->kc_seq_recv; knew->kc_seq_recv = kctx->kc_seq_send; @@ -533,7 +531,7 @@ void gss_delete_sec_context_kerberos(void *internal_ctx) } static -void buf_to_sg(struct scatterlist *sg, char *ptr, int len) +void buf_to_sg(struct scatterlist *sg, void *ptr, int len) { sg->page = virt_to_page(ptr); sg->offset = offset_in_page(ptr); @@ -541,50 +539,101 @@ void buf_to_sg(struct scatterlist *sg, char *ptr, int len) } static -__u32 krb5_encrypt(struct crypto_tfm *tfm, +__u32 krb5_encrypt(struct ll_crypto_cipher *tfm, int decrypt, void * iv, void * in, void * out, int length) { - struct scatterlist sg; + struct blkcipher_desc desc; + struct scatterlist sg; __u8 local_iv[16] = {0}; __u32 ret = -EINVAL; LASSERT(tfm); + desc.tfm = tfm; + desc.info = local_iv; + desc.flags= 0; - if (length % crypto_tfm_alg_blocksize(tfm) != 0) { + if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) { CERROR("output length %d mismatch blocksize %d\n", - length, crypto_tfm_alg_blocksize(tfm)); + length, ll_crypto_blkcipher_blocksize(tfm)); goto out; } - if (crypto_tfm_alg_ivsize(tfm) > 16) { - CERROR("iv size too large %d\n", crypto_tfm_alg_ivsize(tfm)); + if (ll_crypto_blkcipher_ivsize(tfm) > 16) { + CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm)); goto out; } if (iv) - memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm)); + memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm)); memcpy(out, in, length); buf_to_sg(&sg, out, length); if (decrypt) - ret = crypto_cipher_decrypt_iv(tfm, &sg, &sg, length, local_iv); + ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length); else - ret = crypto_cipher_encrypt_iv(tfm, &sg, &sg, length, local_iv); + ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length); out: return(ret); } +#ifdef HAVE_ASYNC_BLOCK_CIPHER + static inline -int krb5_digest_hmac(struct crypto_tfm *tfm, +int krb5_digest_hmac(struct ll_crypto_hash *tfm, rawobj_t *key, struct krb5_header *khdr, int msgcnt, rawobj_t *msgs, + int iovcnt, lnet_kiov_t *iovs, + rawobj_t *cksum) +{ + struct hash_desc desc; + struct scatterlist sg[1]; + int i; + + ll_crypto_hash_setkey(tfm, key->data, key->len); + desc.tfm = tfm; + desc.flags= 0; + + ll_crypto_hash_init(&desc); + + for (i = 0; i < msgcnt; i++) { + if (msgs[i].len == 0) + continue; + buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len); + ll_crypto_hash_update(&desc, sg, msgs[i].len); + } + + for (i = 0; i < iovcnt; i++) { + if (iovs[i].kiov_len == 0) + continue; + sg[0].page = iovs[i].kiov_page; + sg[0].offset = iovs[i].kiov_offset; + sg[0].length = iovs[i].kiov_len; + ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len); + } + + if (khdr) { + buf_to_sg(sg, (char *) khdr, sizeof(*khdr)); + ll_crypto_hash_update(&desc, sg, sizeof(*khdr)); + } + + return ll_crypto_hash_final(&desc, cksum->data); +} + +#else /* ! HAVE_ASYNC_BLOCK_CIPHER */ + +static inline +int krb5_digest_hmac(struct ll_crypto_hash *tfm, + rawobj_t *key, + struct krb5_header *khdr, + int msgcnt, rawobj_t *msgs, + int iovcnt, lnet_kiov_t *iovs, rawobj_t *cksum) { struct scatterlist sg[1]; @@ -599,6 +648,15 @@ int krb5_digest_hmac(struct crypto_tfm *tfm, crypto_hmac_update(tfm, sg, 1); } + for (i = 0; i < iovcnt; i++) { + if (iovs[i].kiov_len == 0) + continue; + sg[0].page = iovs[i].kiov_page; + sg[0].offset = iovs[i].kiov_offset; + sg[0].length = iovs[i].kiov_len; + crypto_hmac_update(tfm, sg, 1); + } + if (khdr) { buf_to_sg(sg, (char *) khdr, sizeof(*khdr)); crypto_hmac_update(tfm, sg, 1); @@ -608,33 +666,48 @@ int krb5_digest_hmac(struct crypto_tfm *tfm, return 0; } +#endif /* HAVE_ASYNC_BLOCK_CIPHER */ + static inline -int krb5_digest_norm(struct crypto_tfm *tfm, +int krb5_digest_norm(struct ll_crypto_hash *tfm, struct krb5_keyblock *kb, struct krb5_header *khdr, int msgcnt, rawobj_t *msgs, + int iovcnt, lnet_kiov_t *iovs, rawobj_t *cksum) { + struct hash_desc desc; struct scatterlist sg[1]; int i; LASSERT(kb->kb_tfm); + desc.tfm = tfm; + desc.flags= 0; - crypto_digest_init(tfm); + ll_crypto_hash_init(&desc); for (i = 0; i < msgcnt; i++) { if (msgs[i].len == 0) continue; buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len); - crypto_digest_update(tfm, sg, 1); + ll_crypto_hash_update(&desc, sg, msgs[i].len); + } + + for (i = 0; i < iovcnt; i++) { + if (iovs[i].kiov_len == 0) + continue; + sg[0].page = iovs[i].kiov_page; + sg[0].offset = iovs[i].kiov_offset; + sg[0].length = iovs[i].kiov_len; + ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len); } if (khdr) { buf_to_sg(sg, (char *) khdr, sizeof(*khdr)); - crypto_digest_update(tfm, sg, 1); + ll_crypto_hash_update(&desc, sg, sizeof(*khdr)); } - crypto_digest_final(tfm, cksum->data); + ll_crypto_hash_final(&desc, cksum->data); return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data, cksum->data, cksum->len); @@ -649,19 +722,20 @@ __s32 krb5_make_checksum(__u32 enctype, struct krb5_keyblock *kb, struct krb5_header *khdr, int msgcnt, rawobj_t *msgs, + int iovcnt, lnet_kiov_t *iovs, rawobj_t *cksum) { - struct krb5_enctype *ke = &enctypes[enctype]; - struct crypto_tfm *tfm; - __u32 code = GSS_S_FAILURE; - int rc; + struct krb5_enctype *ke = &enctypes[enctype]; + struct ll_crypto_hash *tfm; + __u32 code = GSS_S_FAILURE; + int rc; - if (!(tfm = crypto_alloc_tfm(ke->ke_hash_name, 0))) { + if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) { CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name); return GSS_S_FAILURE; } - cksum->len = crypto_tfm_alg_digestsize(tfm); + cksum->len = ll_crypto_hash_digestsize(tfm); OBD_ALLOC(cksum->data, cksum->len); if (!cksum->data) { cksum->len = 0; @@ -670,50 +744,108 @@ __s32 krb5_make_checksum(__u32 enctype, if (ke->ke_hash_hmac) rc = krb5_digest_hmac(tfm, &kb->kb_key, - khdr, msgcnt, msgs, cksum); + khdr, msgcnt, msgs, iovcnt, iovs, cksum); else rc = krb5_digest_norm(tfm, kb, - khdr, msgcnt, msgs, cksum); + khdr, msgcnt, msgs, iovcnt, iovs, cksum); if (rc == 0) code = GSS_S_COMPLETE; out_tfm: - crypto_free_tfm(tfm); + ll_crypto_free_hash(tfm); return code; } +static void fill_krb5_header(struct krb5_ctx *kctx, + struct krb5_header *khdr, + int privacy) +{ + unsigned char acceptor_flag; + + acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR; + + if (privacy) { + khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG); + khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL; + khdr->kh_ec = cpu_to_be16(0); + khdr->kh_rrc = cpu_to_be16(0); + } else { + khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG); + khdr->kh_flags = acceptor_flag; + khdr->kh_ec = cpu_to_be16(0xffff); + khdr->kh_rrc = cpu_to_be16(0xffff); + } + + khdr->kh_filler = 0xff; + spin_lock(&krb5_seq_lock); + khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++); + spin_unlock(&krb5_seq_lock); +} + +static __u32 verify_krb5_header(struct krb5_ctx *kctx, + struct krb5_header *khdr, + int privacy) +{ + unsigned char acceptor_flag; + __u16 tok_id, ec_rrc; + + acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0; + + if (privacy) { + tok_id = KG_TOK_WRAP_MSG; + ec_rrc = 0x0; + } else { + tok_id = KG_TOK_MIC_MSG; + ec_rrc = 0xffff; + } + + /* sanity checks */ + if (be16_to_cpu(khdr->kh_tok_id) != tok_id) { + CERROR("bad token id\n"); + return GSS_S_DEFECTIVE_TOKEN; + } + if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) { + CERROR("bad direction flag\n"); + return GSS_S_BAD_SIG; + } + if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) { + CERROR("missing confidential flag\n"); + return GSS_S_BAD_SIG; + } + if (khdr->kh_filler != 0xff) { + CERROR("bad filler\n"); + return GSS_S_DEFECTIVE_TOKEN; + } + if (be16_to_cpu(khdr->kh_ec) != ec_rrc || + be16_to_cpu(khdr->kh_rrc) != ec_rrc) { + CERROR("bad EC or RRC\n"); + return GSS_S_DEFECTIVE_TOKEN; + } + return GSS_S_COMPLETE; +} + static __u32 gss_get_mic_kerberos(struct gss_ctx *gctx, int msgcnt, rawobj_t *msgs, + int iovcnt, + lnet_kiov_t *iovs, rawobj_t *token) { struct krb5_ctx *kctx = gctx->internal_ctx_id; struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; struct krb5_header *khdr; - unsigned char acceptor_flag; rawobj_t cksum = RAWOBJ_EMPTY; - __u32 rc = GSS_S_FAILURE; - - acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR; /* fill krb5 header */ LASSERT(token->len >= sizeof(*khdr)); khdr = (struct krb5_header *) token->data; - - khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG); - khdr->kh_flags = acceptor_flag; - khdr->kh_filler = 0xff; - khdr->kh_ec = cpu_to_be16(0xffff); - khdr->kh_rrc = cpu_to_be16(0xffff); - spin_lock(&krb5_seq_lock); - khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++); - spin_unlock(&krb5_seq_lock); + fill_krb5_header(kctx, khdr, 0); /* checksum */ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, - khdr, msgcnt, msgs, &cksum)) - goto out_err; + khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) + return GSS_S_FAILURE; LASSERT(cksum.len >= ke->ke_hash_size); LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size); @@ -721,26 +853,23 @@ __u32 gss_get_mic_kerberos(struct gss_ctx *gctx, ke->ke_hash_size); token->len = sizeof(*khdr) + ke->ke_hash_size; - rc = GSS_S_COMPLETE; -out_err: rawobj_free(&cksum); - return rc; + return GSS_S_COMPLETE; } static __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx, int msgcnt, rawobj_t *msgs, + int iovcnt, + lnet_kiov_t *iovs, rawobj_t *token) { struct krb5_ctx *kctx = gctx->internal_ctx_id; struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; struct krb5_header *khdr; - unsigned char acceptor_flag; rawobj_t cksum = RAWOBJ_EMPTY; - __u32 rc = GSS_S_FAILURE; - - acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0; + __u32 major; if (token->len < sizeof(*khdr)) { CERROR("short signature: %u\n", token->len); @@ -749,47 +878,34 @@ __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx, khdr = (struct krb5_header *) token->data; - /* sanity checks */ - if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_MIC_MSG) { - CERROR("bad token id\n"); - return GSS_S_DEFECTIVE_TOKEN; - } - if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) { - CERROR("bad direction flag\n"); - return GSS_S_BAD_SIG; - } - if (khdr->kh_filler != 0xff) { - CERROR("bad filler\n"); - return GSS_S_DEFECTIVE_TOKEN; - } - if (be16_to_cpu(khdr->kh_ec) != 0xffff || - be16_to_cpu(khdr->kh_rrc) != 0xffff) { - CERROR("bad EC or RRC\n"); - return GSS_S_DEFECTIVE_TOKEN; + major = verify_krb5_header(kctx, khdr, 0); + if (major != GSS_S_COMPLETE) { + CERROR("bad krb5 header\n"); + return major; } if (token->len < sizeof(*khdr) + ke->ke_hash_size) { CERROR("short signature: %u, require %d\n", token->len, (int) sizeof(*khdr) + ke->ke_hash_size); - goto out; + return GSS_S_FAILURE; } if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, - khdr, msgcnt, msgs, &cksum)) + khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) { + CERROR("failed to make checksum\n"); return GSS_S_FAILURE; + } LASSERT(cksum.len >= ke->ke_hash_size); if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size, ke->ke_hash_size)) { CERROR("checksum mismatch\n"); - rc = GSS_S_BAD_SIG; - goto out; + rawobj_free(&cksum); + return GSS_S_BAD_SIG; } - rc = GSS_S_COMPLETE; -out: rawobj_free(&cksum); - return rc; + return GSS_S_COMPLETE; } static @@ -814,20 +930,24 @@ int add_padding(rawobj_t *msg, int msg_buflen, int blocksize) } static -int krb5_encrypt_rawobjs(struct crypto_tfm *tfm, +int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm, int mode_ecb, int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj, int enc) { - struct scatterlist src, dst; - __u8 local_iv[16] = {0}, *buf; - __u32 datalen = 0; - int i, rc; + struct blkcipher_desc desc; + struct scatterlist src, dst; + __u8 local_iv[16] = {0}, *buf; + __u32 datalen = 0; + int i, rc; ENTRY; buf = outobj->data; + desc.tfm = tfm; + desc.info = local_iv; + desc.flags = 0; for (i = 0; i < inobj_cnt; i++) { LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len); @@ -837,18 +957,18 @@ int krb5_encrypt_rawobjs(struct crypto_tfm *tfm, if (mode_ecb) { if (enc) - rc = crypto_cipher_encrypt( - tfm, &dst, &src, src.length); + rc = ll_crypto_blkcipher_encrypt( + &desc, &dst, &src, src.length); else - rc = crypto_cipher_decrypt( - tfm, &dst, &src, src.length); + rc = ll_crypto_blkcipher_decrypt( + &desc, &dst, &src, src.length); } else { if (enc) - rc = crypto_cipher_encrypt_iv( - tfm, &dst, &src, src.length, local_iv); + rc = ll_crypto_blkcipher_encrypt_iv( + &desc, &dst, &src, src.length); else - rc = crypto_cipher_decrypt_iv( - tfm, &dst, &src, src.length, local_iv); + rc = ll_crypto_blkcipher_decrypt_iv( + &desc, &dst, &src, src.length); } if (rc) { @@ -864,8 +984,238 @@ int krb5_encrypt_rawobjs(struct crypto_tfm *tfm, RETURN(0); } +/* + * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size. + */ +static +int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, + struct krb5_header *khdr, + char *confounder, + struct ptlrpc_bulk_desc *desc, + rawobj_t *cipher, + int adj_nob) +{ + struct blkcipher_desc ciph_desc; + __u8 local_iv[16] = {0}; + struct scatterlist src, dst; + int blocksize, i, rc, nob = 0; + + LASSERT(desc->bd_iov_count); + LASSERT(desc->bd_enc_iov); + + blocksize = ll_crypto_blkcipher_blocksize(tfm); + LASSERT(blocksize > 1); + LASSERT(cipher->len == blocksize + sizeof(*khdr)); + + ciph_desc.tfm = tfm; + ciph_desc.info = local_iv; + ciph_desc.flags = 0; + + /* encrypt confounder */ + buf_to_sg(&src, confounder, blocksize); + buf_to_sg(&dst, cipher->data, blocksize); + + rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize); + if (rc) { + CERROR("error to encrypt confounder: %d\n", rc); + return rc; + } + + /* encrypt clear pages */ + for (i = 0; i < desc->bd_iov_count; i++) { + src.page = desc->bd_iov[i].kiov_page; + src.offset = desc->bd_iov[i].kiov_offset; + src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) & + (~(blocksize - 1)); + + if (adj_nob) + nob += src.length; + + dst.page = desc->bd_enc_iov[i].kiov_page; + dst.offset = src.offset; + dst.length = src.length; + + desc->bd_enc_iov[i].kiov_offset = dst.offset; + desc->bd_enc_iov[i].kiov_len = dst.length; + + rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, + src.length); + if (rc) { + CERROR("error to encrypt page: %d\n", rc); + return rc; + } + } + + /* encrypt krb5 header */ + buf_to_sg(&src, khdr, sizeof(*khdr)); + buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr)); + + rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, + &dst, &src, sizeof(*khdr)); + if (rc) { + CERROR("error to encrypt krb5 header: %d\n", rc); + return rc; + } + + if (adj_nob) + desc->bd_nob = nob; + + return 0; +} + +/* + * desc->bd_nob_transferred is the size of cipher text received. + * desc->bd_nob is the target size of plain text supposed to be. + * + * if adj_nob != 0, we adjust each page's kiov_len to the actual + * plain text size. + * - for client read: we don't know data size for each page, so + * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might + * be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len. + * this means we DO NOT support the situation that server send an odd size + * data in a page which is not the last one. + * - for server write: we knows exactly data size for each page being expected, + * thus kiov_len is accurate already, so we should not adjust it at all. + * and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which + * should have been done by prep_bulk(). + */ +static +int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm, + struct krb5_header *khdr, + struct ptlrpc_bulk_desc *desc, + rawobj_t *cipher, + rawobj_t *plain, + int adj_nob) +{ + struct blkcipher_desc ciph_desc; + __u8 local_iv[16] = {0}; + struct scatterlist src, dst; + int ct_nob = 0, pt_nob = 0; + int blocksize, i, rc; + + LASSERT(desc->bd_iov_count); + LASSERT(desc->bd_enc_iov); + LASSERT(desc->bd_nob_transferred); + + blocksize = ll_crypto_blkcipher_blocksize(tfm); + LASSERT(blocksize > 1); + LASSERT(cipher->len == blocksize + sizeof(*khdr)); + + ciph_desc.tfm = tfm; + ciph_desc.info = local_iv; + ciph_desc.flags = 0; + + if (desc->bd_nob_transferred % blocksize) { + CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred); + return -EPROTO; + } + + /* decrypt head (confounder) */ + buf_to_sg(&src, cipher->data, blocksize); + buf_to_sg(&dst, plain->data, blocksize); + + rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize); + if (rc) { + CERROR("error to decrypt confounder: %d\n", rc); + return rc; + } + + for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred; + i++) { + if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 || + desc->bd_enc_iov[i].kiov_len % blocksize != 0) { + CERROR("page %d: odd offset %u len %u, blocksize %d\n", + i, desc->bd_enc_iov[i].kiov_offset, + desc->bd_enc_iov[i].kiov_len, blocksize); + return -EFAULT; + } + + if (adj_nob) { + if (ct_nob + desc->bd_enc_iov[i].kiov_len > + desc->bd_nob_transferred) + desc->bd_enc_iov[i].kiov_len = + desc->bd_nob_transferred - ct_nob; + + desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len; + if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob) + desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob; + } else { + /* this should be guaranteed by LNET */ + LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <= + desc->bd_nob_transferred); + LASSERT(desc->bd_iov[i].kiov_len <= + desc->bd_enc_iov[i].kiov_len); + } + + if (desc->bd_enc_iov[i].kiov_len == 0) + continue; + + src.page = desc->bd_enc_iov[i].kiov_page; + src.offset = desc->bd_enc_iov[i].kiov_offset; + src.length = desc->bd_enc_iov[i].kiov_len; + + dst = src; + if (desc->bd_iov[i].kiov_len % blocksize == 0) + dst.page = desc->bd_iov[i].kiov_page; + + rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, + src.length); + if (rc) { + CERROR("error to decrypt page: %d\n", rc); + return rc; + } + + if (desc->bd_iov[i].kiov_len % blocksize != 0) { + memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) + + desc->bd_iov[i].kiov_offset, + cfs_page_address(desc->bd_enc_iov[i].kiov_page) + + desc->bd_iov[i].kiov_offset, + desc->bd_iov[i].kiov_len); + } + + ct_nob += desc->bd_enc_iov[i].kiov_len; + pt_nob += desc->bd_iov[i].kiov_len; + } + + if (unlikely(ct_nob != desc->bd_nob_transferred)) { + CERROR("%d cipher text transferred but only %d decrypted\n", + desc->bd_nob_transferred, ct_nob); + return -EFAULT; + } + + if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) { + CERROR("%d plain text expected but only %d received\n", + desc->bd_nob, pt_nob); + return -EFAULT; + } + + /* if needed, clear up the rest unused iovs */ + if (adj_nob) + while (i < desc->bd_iov_count) + desc->bd_iov[i++].kiov_len = 0; + + /* decrypt tail (krb5 header) */ + buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr)); + buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr)); + + rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, + &dst, &src, sizeof(*khdr)); + if (rc) { + CERROR("error to decrypt tail: %d\n", rc); + return rc; + } + + if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) { + CERROR("krb5 header doesn't match\n"); + return -EACCES; + } + + return 0; +} + static __u32 gss_wrap_kerberos(struct gss_ctx *gctx, + rawobj_t *gsshdr, rawobj_t *msg, int msg_buflen, rawobj_t *token) @@ -873,46 +1223,41 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, struct krb5_ctx *kctx = gctx->internal_ctx_id; struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; struct krb5_header *khdr; - unsigned char acceptor_flag; int blocksize; rawobj_t cksum = RAWOBJ_EMPTY; rawobj_t data_desc[3], cipher; __u8 conf[GSS_MAX_CIPHER_BLOCK]; - int enc_rc = 0; + int rc = 0; LASSERT(ke); LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK); LASSERT(kctx->kc_keye.kb_tfm == NULL || ke->ke_conf_size >= - crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm)); + ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm)); - acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR; + /* + * final token format: + * --------------------------------------------------- + * | krb5 header | cipher text | checksum (16 bytes) | + * --------------------------------------------------- + */ /* fill krb5 header */ LASSERT(token->len >= sizeof(*khdr)); khdr = (struct krb5_header *) token->data; - - khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG); - khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL; - khdr->kh_filler = 0xff; - khdr->kh_ec = cpu_to_be16(0); - khdr->kh_rrc = cpu_to_be16(0); - spin_lock(&krb5_seq_lock); - khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++); - spin_unlock(&krb5_seq_lock); + fill_krb5_header(kctx, khdr, 1); /* generate confounder */ get_random_bytes(conf, ke->ke_conf_size); /* get encryption blocksize. note kc_keye might not associated with - * a tfm, currently only for arcfour-hmac - */ + * a tfm, currently only for arcfour-hmac */ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { LASSERT(kctx->kc_keye.kb_tfm == NULL); blocksize = 1; } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm); + blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); } LASSERT(blocksize <= ke->ke_conf_size); @@ -921,7 +1266,26 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, return GSS_S_FAILURE; /* - * clear text layout, same for both checksum & encryption: + * clear text layout for checksum: + * ------------------------------------------------------ + * | confounder | gss header | clear msgs | krb5 header | + * ------------------------------------------------------ + */ + data_desc[0].data = conf; + data_desc[0].len = ke->ke_conf_size; + data_desc[1].data = gsshdr->data; + data_desc[1].len = gsshdr->len; + data_desc[2].data = msg->data; + data_desc[2].len = msg->len; + + /* compute checksum */ + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 3, data_desc, 0, NULL, &cksum)) + return GSS_S_FAILURE; + LASSERT(cksum.len >= ke->ke_hash_size); + + /* + * clear text layout for encryption: * ----------------------------------------- * | confounder | clear msgs | krb5 header | * ----------------------------------------- @@ -933,54 +1297,192 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, data_desc[2].data = (__u8 *) khdr; data_desc[2].len = sizeof(*khdr); - /* compute checksum */ - if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, - khdr, 3, data_desc, &cksum)) - return GSS_S_FAILURE; - LASSERT(cksum.len >= ke->ke_hash_size); - - /* encrypting, cipher text will be directly inplace */ + /* cipher text will be directly inplace */ cipher.data = (__u8 *) (khdr + 1); cipher.len = token->len - sizeof(*khdr); LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr)); if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { - rawobj_t arc4_keye; - struct crypto_tfm *arc4_tfm; + rawobj_t arc4_keye; + struct ll_crypto_cipher *arc4_tfm; if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, - NULL, 1, &cksum, &arc4_keye)) { + NULL, 1, &cksum, 0, NULL, &arc4_keye)) { CERROR("failed to obtain arc4 enc key\n"); - GOTO(arc4_out, enc_rc = -EACCES); + GOTO(arc4_out, rc = -EACCES); } - arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB); + arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0); if (arc4_tfm == NULL) { CERROR("failed to alloc tfm arc4 in ECB mode\n"); - GOTO(arc4_out_key, enc_rc = -EACCES); + GOTO(arc4_out_key, rc = -EACCES); } - if (crypto_cipher_setkey(arc4_tfm, - arc4_keye.data, arc4_keye.len)) { + if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, + arc4_keye.len)) { CERROR("failed to set arc4 key, len %d\n", arc4_keye.len); - GOTO(arc4_out_tfm, enc_rc = -EACCES); + GOTO(arc4_out_tfm, rc = -EACCES); } - enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1, - 3, data_desc, &cipher, 1); + rc = krb5_encrypt_rawobjs(arc4_tfm, 1, + 3, data_desc, &cipher, 1); arc4_out_tfm: - crypto_free_tfm(arc4_tfm); + ll_crypto_free_blkcipher(arc4_tfm); arc4_out_key: rawobj_free(&arc4_keye); arc4_out: do {} while(0); /* just to avoid compile warning */ } else { - enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0, - 3, data_desc, &cipher, 1); + rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0, + 3, data_desc, &cipher, 1); + } + + if (rc != 0) { + rawobj_free(&cksum); + return GSS_S_FAILURE; + } + + /* fill in checksum */ + LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size); + memcpy((char *)(khdr + 1) + cipher.len, + cksum.data + cksum.len - ke->ke_hash_size, + ke->ke_hash_size); + rawobj_free(&cksum); + + /* final token length */ + token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size; + return GSS_S_COMPLETE; +} + +static +__u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx, + struct ptlrpc_bulk_desc *desc) +{ + struct krb5_ctx *kctx = gctx->internal_ctx_id; + int blocksize, i; + + LASSERT(desc->bd_iov_count); + LASSERT(desc->bd_enc_iov); + LASSERT(kctx->kc_keye.kb_tfm); + + blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + + for (i = 0; i < desc->bd_iov_count; i++) { + LASSERT(desc->bd_enc_iov[i].kiov_page); + /* + * offset should always start at page boundary of either + * client or server side. + */ + if (desc->bd_iov[i].kiov_offset & blocksize) { + CERROR("odd offset %d in page %d\n", + desc->bd_iov[i].kiov_offset, i); + return GSS_S_FAILURE; + } + + desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset; + desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len + + blocksize - 1) & (~(blocksize - 1)); + } + + return GSS_S_COMPLETE; +} + +static +__u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, + struct ptlrpc_bulk_desc *desc, + rawobj_t *token, int adj_nob) +{ + struct krb5_ctx *kctx = gctx->internal_ctx_id; + struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; + struct krb5_header *khdr; + int blocksize; + rawobj_t cksum = RAWOBJ_EMPTY; + rawobj_t data_desc[1], cipher; + __u8 conf[GSS_MAX_CIPHER_BLOCK]; + int rc = 0; + + LASSERT(ke); + LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK); + + /* + * final token format: + * -------------------------------------------------- + * | krb5 header | head/tail cipher text | checksum | + * -------------------------------------------------- + */ + + /* fill krb5 header */ + LASSERT(token->len >= sizeof(*khdr)); + khdr = (struct krb5_header *) token->data; + fill_krb5_header(kctx, khdr, 1); + + /* generate confounder */ + get_random_bytes(conf, ke->ke_conf_size); + + /* get encryption blocksize. note kc_keye might not associated with + * a tfm, currently only for arcfour-hmac */ + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + LASSERT(kctx->kc_keye.kb_tfm == NULL); + blocksize = 1; + } else { + LASSERT(kctx->kc_keye.kb_tfm); + blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + } + + /* + * we assume the size of krb5_header (16 bytes) must be n * blocksize. + * the bulk token size would be exactly (sizeof(krb5_header) + + * blocksize + sizeof(krb5_header) + hashsize) + */ + LASSERT(blocksize <= ke->ke_conf_size); + LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0); + LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16); + + /* + * clear text layout for checksum: + * ------------------------------------------ + * | confounder | clear pages | krb5 header | + * ------------------------------------------ + */ + data_desc[0].data = conf; + data_desc[0].len = ke->ke_conf_size; + + /* compute checksum */ + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 1, data_desc, + desc->bd_iov_count, desc->bd_iov, + &cksum)) + return GSS_S_FAILURE; + LASSERT(cksum.len >= ke->ke_hash_size); + + /* + * clear text layout for encryption: + * ------------------------------------------ + * | confounder | clear pages | krb5 header | + * ------------------------------------------ + * | | | + * ---------- (cipher pages) | + * result token: | | + * ------------------------------------------- + * | krb5 header | cipher text | cipher text | + * ------------------------------------------- + */ + data_desc[0].data = conf; + data_desc[0].len = ke->ke_conf_size; + + cipher.data = (__u8 *) (khdr + 1); + cipher.len = blocksize + sizeof(*khdr); + + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + LBUG(); + rc = 0; + } else { + rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr, + conf, desc, &cipher, adj_nob); } - if (enc_rc != 0) { + if (rc != 0) { rawobj_free(&cksum); return GSS_S_FAILURE; } @@ -999,23 +1501,23 @@ arc4_out: static __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, + rawobj_t *gsshdr, rawobj_t *token, rawobj_t *msg) { struct krb5_ctx *kctx = gctx->internal_ctx_id; struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; struct krb5_header *khdr; - unsigned char acceptor_flag; unsigned char *tmpbuf; int blocksize, bodysize; rawobj_t cksum = RAWOBJ_EMPTY; rawobj_t cipher_in, plain_out; - __u32 rc = GSS_S_FAILURE, enc_rc = 0; + rawobj_t hash_objs[3]; + int rc = 0; + __u32 major; LASSERT(ke); - acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0; - if (token->len < sizeof(*khdr)) { CERROR("short signature: %u\n", token->len); return GSS_S_DEFECTIVE_TOKEN; @@ -1023,27 +1525,10 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, khdr = (struct krb5_header *) token->data; - /* sanity check header */ - if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_WRAP_MSG) { - CERROR("bad token id\n"); - return GSS_S_DEFECTIVE_TOKEN; - } - if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) { - CERROR("bad direction flag\n"); - return GSS_S_BAD_SIG; - } - if ((khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) { - CERROR("missing confidential flag\n"); - return GSS_S_BAD_SIG; - } - if (khdr->kh_filler != 0xff) { - CERROR("bad filler\n"); - return GSS_S_DEFECTIVE_TOKEN; - } - if (be16_to_cpu(khdr->kh_ec) != 0x0 || - be16_to_cpu(khdr->kh_rrc) != 0x0) { - CERROR("bad EC or RRC\n"); - return GSS_S_DEFECTIVE_TOKEN; + major = verify_krb5_header(kctx, khdr, 1); + if (major != GSS_S_COMPLETE) { + CERROR("bad krb5 header\n"); + return major; } /* block size */ @@ -1052,7 +1537,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, blocksize = 1; } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm); + blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); } /* expected token layout: @@ -1083,51 +1568,53 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, if (!tmpbuf) return GSS_S_FAILURE; + major = GSS_S_FAILURE; + cipher_in.data = (__u8 *) (khdr + 1); cipher_in.len = bodysize; plain_out.data = tmpbuf; plain_out.len = bodysize; if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { - rawobj_t arc4_keye; - struct crypto_tfm *arc4_tfm; + rawobj_t arc4_keye; + struct ll_crypto_cipher *arc4_tfm; cksum.data = token->data + token->len - ke->ke_hash_size; cksum.len = ke->ke_hash_size; if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, - NULL, 1, &cksum, &arc4_keye)) { + NULL, 1, &cksum, 0, NULL, &arc4_keye)) { CERROR("failed to obtain arc4 enc key\n"); - GOTO(arc4_out, enc_rc = -EACCES); + GOTO(arc4_out, rc = -EACCES); } - arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB); + arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0); if (arc4_tfm == NULL) { CERROR("failed to alloc tfm arc4 in ECB mode\n"); - GOTO(arc4_out_key, enc_rc = -EACCES); + GOTO(arc4_out_key, rc = -EACCES); } - if (crypto_cipher_setkey(arc4_tfm, + if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, arc4_keye.len)) { CERROR("failed to set arc4 key, len %d\n", arc4_keye.len); - GOTO(arc4_out_tfm, enc_rc = -EACCES); + GOTO(arc4_out_tfm, rc = -EACCES); } - enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1, - 1, &cipher_in, &plain_out, 0); + rc = krb5_encrypt_rawobjs(arc4_tfm, 1, + 1, &cipher_in, &plain_out, 0); arc4_out_tfm: - crypto_free_tfm(arc4_tfm); + ll_crypto_free_blkcipher(arc4_tfm); arc4_out_key: rawobj_free(&arc4_keye); arc4_out: cksum = RAWOBJ_EMPTY; } else { - enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0, - 1, &cipher_in, &plain_out, 0); + rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0, + 1, &cipher_in, &plain_out, 0); } - if (enc_rc != 0) { + if (rc != 0) { CERROR("error decrypt\n"); goto out_free; } @@ -1139,51 +1626,135 @@ arc4_out: * ----------------------------------------- */ - /* last part must be identical to the krb5 header */ + /* verify krb5 header in token is not modified */ if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr), sizeof(*khdr))) { - CERROR("decrypted header mismatch\n"); + CERROR("decrypted krb5 header mismatch\n"); goto out_free; } - /* verify checksum */ + /* verify checksum, compose clear text as layout: + * ------------------------------------------------------ + * | confounder | gss header | clear msgs | krb5 header | + * ------------------------------------------------------ + */ + hash_objs[0].len = ke->ke_conf_size; + hash_objs[0].data = plain_out.data; + hash_objs[1].len = gsshdr->len; + hash_objs[1].data = gsshdr->data; + hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr); + hash_objs[2].data = plain_out.data + ke->ke_conf_size; if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, - khdr, 1, &plain_out, &cksum)) + khdr, 3, hash_objs, 0, NULL, &cksum)) goto out_free; LASSERT(cksum.len >= ke->ke_hash_size); if (memcmp((char *)(khdr + 1) + bodysize, cksum.data + cksum.len - ke->ke_hash_size, ke->ke_hash_size)) { - CERROR("cksum mismatch\n"); + CERROR("checksum mismatch\n"); goto out_free; } msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr); memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len); - rc = GSS_S_COMPLETE; + major = GSS_S_COMPLETE; out_free: OBD_FREE(tmpbuf, bodysize); rawobj_free(&cksum); - return rc; + return major; } static -__u32 gss_plain_encrypt_kerberos(struct gss_ctx *ctx, - int length, - void *in_buf, - void *out_buf) +__u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, + struct ptlrpc_bulk_desc *desc, + rawobj_t *token, int adj_nob) { - struct krb5_ctx *kctx = ctx->internal_ctx_id; - __u32 rc; + struct krb5_ctx *kctx = gctx->internal_ctx_id; + struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; + struct krb5_header *khdr; + int blocksize; + rawobj_t cksum = RAWOBJ_EMPTY; + rawobj_t cipher, plain; + rawobj_t data_desc[1]; + int rc; + __u32 major; + + LASSERT(ke); - rc = krb5_encrypt(kctx->kc_keye.kb_tfm, 0, - NULL, in_buf, out_buf, length); + if (token->len < sizeof(*khdr)) { + CERROR("short signature: %u\n", token->len); + return GSS_S_DEFECTIVE_TOKEN; + } + + khdr = (struct krb5_header *) token->data; + + major = verify_krb5_header(kctx, khdr, 1); + if (major != GSS_S_COMPLETE) { + CERROR("bad krb5 header\n"); + return major; + } + + /* block size */ + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + LASSERT(kctx->kc_keye.kb_tfm == NULL); + blocksize = 1; + LBUG(); + } else { + LASSERT(kctx->kc_keye.kb_tfm); + blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + } + LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0); + + /* + * token format is expected as: + * ----------------------------------------------- + * | krb5 header | head/tail cipher text | cksum | + * ----------------------------------------------- + */ + if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) + + ke->ke_hash_size) { + CERROR("short token size: %u\n", token->len); + return GSS_S_DEFECTIVE_TOKEN; + } + + cipher.data = (__u8 *) (khdr + 1); + cipher.len = blocksize + sizeof(*khdr); + plain.data = cipher.data; + plain.len = cipher.len; + + rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr, + desc, &cipher, &plain, adj_nob); if (rc) - CERROR("plain encrypt error: %d\n", rc); + return GSS_S_DEFECTIVE_TOKEN; + + /* + * verify checksum, compose clear text as layout: + * ------------------------------------------ + * | confounder | clear pages | krb5 header | + * ------------------------------------------ + */ + data_desc[0].data = plain.data; + data_desc[0].len = blocksize; + + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 1, data_desc, + desc->bd_iov_count, desc->bd_iov, + &cksum)) + return GSS_S_FAILURE; + LASSERT(cksum.len >= ke->ke_hash_size); - return rc; + if (memcmp(plain.data + blocksize + sizeof(*khdr), + cksum.data + cksum.len - ke->ke_hash_size, + ke->ke_hash_size)) { + CERROR("checksum mismatch\n"); + rawobj_free(&cksum); + return GSS_S_BAD_SIG; + } + + rawobj_free(&cksum); + return GSS_S_COMPLETE; } int gss_display_kerberos(struct gss_ctx *ctx, @@ -1193,10 +1764,8 @@ int gss_display_kerberos(struct gss_ctx *ctx, struct krb5_ctx *kctx = ctx->internal_ctx_id; int written; - written = snprintf(buf, bufsize, - " mech: krb5\n" - " enctype: %s\n", - enctype2str(kctx->kc_enctype)); + written = snprintf(buf, bufsize, "krb5 (%s)", + enctype2str(kctx->kc_enctype)); return written; } @@ -1208,22 +1777,30 @@ static struct gss_api_ops gss_kerberos_ops = { .gss_verify_mic = gss_verify_mic_kerberos, .gss_wrap = gss_wrap_kerberos, .gss_unwrap = gss_unwrap_kerberos, - .gss_plain_encrypt = gss_plain_encrypt_kerberos, + .gss_prep_bulk = gss_prep_bulk_kerberos, + .gss_wrap_bulk = gss_wrap_bulk_kerberos, + .gss_unwrap_bulk = gss_unwrap_bulk_kerberos, .gss_delete_sec_context = gss_delete_sec_context_kerberos, .gss_display = gss_display_kerberos, }; static struct subflavor_desc gss_kerberos_sfs[] = { { - .sf_subflavor = SPTLRPC_SUBFLVR_KRB5, + .sf_subflavor = SPTLRPC_SUBFLVR_KRB5N, .sf_qop = 0, - .sf_service = SPTLRPC_SVC_NONE, - .sf_name = "krb5" + .sf_service = SPTLRPC_SVC_NULL, + .sf_name = "krb5n" }, { - .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I, + .sf_subflavor = SPTLRPC_SUBFLVR_KRB5A, .sf_qop = 0, .sf_service = SPTLRPC_SVC_AUTH, + .sf_name = "krb5a" + }, + { + .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I, + .sf_qop = 0, + .sf_service = SPTLRPC_SVC_INTG, .sf_name = "krb5i" }, { @@ -1243,7 +1820,7 @@ static struct gss_api_mech gss_kerberos_mech = { .gm_oid = (rawobj_t) {9, "\052\206\110\206\367\022\001\002\002"}, .gm_ops = &gss_kerberos_ops, - .gm_sf_num = 3, + .gm_sf_num = 4, .gm_sfs = gss_kerberos_sfs, }; @@ -1251,6 +1828,8 @@ int __init init_kerberos_module(void) { int status; + spin_lock_init(&krb5_seq_lock); + status = lgss_mech_register(&gss_kerberos_mech); if (status) CERROR("Failed to register kerberos gss mechanism!\n");