X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fgss%2Fgss_krb5_mech.c;h=09895ba43487e9e2f2a31d5ffed9ba98dbd44a42;hb=671c1b0c705640d63a1d3be7016c79afd10bc8df;hp=3b7da5c23a5a90bb0e78bbfe7765ca26ac3ef789;hpb=6a5b0b1ce3bbf5be2a8babba8619b95346edb24f;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index 3b7da5c..09895ba 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -1,9 +1,10 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * Modifications for Lustre - * Copyright 2004 - 2006, Cluster File Systems, Inc. - * All rights reserved + * + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * + * Copyright (c) 2011, 2015, Intel Corporation. + * * Author: Eric Mei */ @@ -47,20 +48,12 @@ * */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #define DEBUG_SUBSYSTEM S_SEC -#ifdef __KERNEL__ #include #include #include #include -#include #include -#else -#include -#endif #include #include @@ -76,7 +69,7 @@ #include "gss_asn1.h" #include "gss_krb5.h" -spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED; +static spinlock_t krb5_seq_lock; struct krb5_enctype { char *ke_dispname; @@ -154,14 +147,14 @@ static const char * enctype2str(__u32 enctype) static int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode) { - kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0); - if (kb->kb_tfm == NULL) { - CERROR("failed to alloc tfm: %s, mode %d\n", - alg_name, alg_mode); - return -1; - } - - if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) { + kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0); + if (IS_ERR(kb->kb_tfm)) { + CERROR("failed to alloc tfm: %s, mode %d\n", + alg_name, alg_mode); + return -1; + } + + if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) { CERROR("failed to set %s key, len %d\n", alg_name, kb->kb_key.len); return -1; @@ -204,7 +197,7 @@ void keyblock_free(struct krb5_keyblock *kb) { rawobj_free(&kb->kb_key); if (kb->kb_tfm) - ll_crypto_free_blkcipher(kb->kb_tfm); + crypto_free_blkcipher(kb->kb_tfm); } static @@ -240,7 +233,7 @@ int get_rawobj(char **ptr, const char *end, rawobj_t *res) if (q > end || q < p) return -1; - OBD_ALLOC(res->data, len); + OBD_ALLOC_LARGE(res->data, len); if (!res->data) return -1; @@ -256,12 +249,12 @@ int get_keyblock(char **ptr, const char *end, { char *buf; - OBD_ALLOC(buf, keysize); + OBD_ALLOC_LARGE(buf, keysize); if (buf == NULL) return -1; if (get_bytes(ptr, end, buf, keysize)) { - OBD_FREE(buf, keysize); + OBD_FREE_LARGE(buf, keysize); return -1; } @@ -348,7 +341,7 @@ __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end) if (p != end) goto out_err; - CDEBUG(D_SEC, "succesfully imported rfc1964 context\n"); + CDEBUG(D_SEC, "successfully imported rfc1964 context\n"); return 0; out_err: return GSS_S_FAILURE; @@ -410,7 +403,7 @@ __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end) if (get_keyblock(&p, end, &kctx->kc_keyc, keysize)) goto out_err; - CDEBUG(D_SEC, "succesfully imported v2 context\n"); + CDEBUG(D_SEC, "successfully imported v2 context\n"); return 0; out_err: return GSS_S_FAILURE; @@ -501,7 +494,7 @@ __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx, goto out_err; gctx_new->internal_ctx_id = knew; - CDEBUG(D_SEC, "succesfully copied reverse context\n"); + CDEBUG(D_SEC, "successfully copied reverse context\n"); return GSS_S_COMPLETE; out_err: @@ -530,15 +523,14 @@ void gss_delete_sec_context_kerberos(void *internal_ctx) } static -void buf_to_sg(struct scatterlist *sg, char *ptr, int len) +void buf_to_sg(struct scatterlist *sg, void *ptr, int len) { - sg->page = virt_to_page(ptr); - sg->offset = offset_in_page(ptr); - sg->length = len; + sg_init_table(sg, 1); + sg_set_buf(sg, ptr, len); } static -__u32 krb5_encrypt(struct ll_crypto_cipher *tfm, +__u32 krb5_encrypt(struct crypto_blkcipher *tfm, int decrypt, void * iv, void * in, @@ -555,93 +547,81 @@ __u32 krb5_encrypt(struct ll_crypto_cipher *tfm, desc.info = local_iv; desc.flags= 0; - if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) { + if (length % crypto_blkcipher_blocksize(tfm) != 0) { CERROR("output length %d mismatch blocksize %d\n", - length, ll_crypto_blkcipher_blocksize(tfm)); + length, crypto_blkcipher_blocksize(tfm)); goto out; } - if (ll_crypto_blkcipher_ivsize(tfm) > 16) { - CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm)); + if (crypto_blkcipher_ivsize(tfm) > 16) { + CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm)); goto out; } if (iv) - memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm)); + memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); memcpy(out, in, length); buf_to_sg(&sg, out, length); if (decrypt) - ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length); + ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length); else - ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length); + ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length); out: return(ret); } static inline -int krb5_digest_hmac(struct ll_crypto_hash *tfm, +int krb5_digest_hmac(struct crypto_hash *tfm, rawobj_t *key, struct krb5_header *khdr, int msgcnt, rawobj_t *msgs, + int iovcnt, lnet_kiov_t *iovs, rawobj_t *cksum) -#ifdef HAVE_ASYNC_BLOCK_CIPHER { struct hash_desc desc; struct scatterlist sg[1]; int i; - ll_crypto_hash_setkey(tfm, key->data, key->len); + crypto_hash_setkey(tfm, key->data, key->len); desc.tfm = tfm; desc.flags= 0; - ll_crypto_hash_init(&desc); + crypto_hash_init(&desc); for (i = 0; i < msgcnt; i++) { if (msgs[i].len == 0) continue; buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len); - ll_crypto_hash_update(&desc, sg, msgs[i].len); - } - - if (khdr) { - buf_to_sg(sg, (char *) khdr, sizeof(*khdr)); - ll_crypto_hash_update(&desc, sg, sizeof(*khdr)); + crypto_hash_update(&desc, sg, msgs[i].len); } - return ll_crypto_hash_final(&desc, cksum->data); -} -#else /* HAVE_ASYNC_BLOCK_CIPHER */ -{ - struct scatterlist sg[1]; - __u32 keylen = key->len, i; - - crypto_hmac_init(tfm, key->data, &keylen); - - for (i = 0; i < msgcnt; i++) { - if (msgs[i].len == 0) + for (i = 0; i < iovcnt; i++) { + if (iovs[i].kiov_len == 0) continue; - buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len); - crypto_hmac_update(tfm, sg, 1); + + sg_init_table(sg, 1); + sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, + iovs[i].kiov_offset); + crypto_hash_update(&desc, sg, iovs[i].kiov_len); } if (khdr) { buf_to_sg(sg, (char *) khdr, sizeof(*khdr)); - crypto_hmac_update(tfm, sg, 1); + crypto_hash_update(&desc, sg, sizeof(*khdr)); } - crypto_hmac_final(tfm, key->data, &keylen, cksum->data); - return 0; + return crypto_hash_final(&desc, cksum->data); } -#endif /* HAVE_ASYNC_BLOCK_CIPHER */ static inline -int krb5_digest_norm(struct ll_crypto_hash *tfm, +int krb5_digest_norm(struct crypto_hash *tfm, struct krb5_keyblock *kb, struct krb5_header *khdr, int msgcnt, rawobj_t *msgs, + int iovcnt, lnet_kiov_t *iovs, rawobj_t *cksum) { struct hash_desc desc; @@ -652,21 +632,31 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm, desc.tfm = tfm; desc.flags= 0; - ll_crypto_hash_init(&desc); + crypto_hash_init(&desc); for (i = 0; i < msgcnt; i++) { if (msgs[i].len == 0) continue; buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len); - ll_crypto_hash_update(&desc, sg, msgs[i].len); + crypto_hash_update(&desc, sg, msgs[i].len); + } + + for (i = 0; i < iovcnt; i++) { + if (iovs[i].kiov_len == 0) + continue; + + sg_init_table(sg, 1); + sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, + iovs[i].kiov_offset); + crypto_hash_update(&desc, sg, iovs[i].kiov_len); } if (khdr) { buf_to_sg(sg, (char *) khdr, sizeof(*khdr)); - ll_crypto_hash_update(&desc, sg, sizeof(*khdr)); + crypto_hash_update(&desc, sg, sizeof(*khdr)); } - ll_crypto_hash_final(&desc, cksum->data); + crypto_hash_final(&desc, cksum->data); return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data, cksum->data, cksum->len); @@ -681,20 +671,21 @@ __s32 krb5_make_checksum(__u32 enctype, struct krb5_keyblock *kb, struct krb5_header *khdr, int msgcnt, rawobj_t *msgs, + int iovcnt, lnet_kiov_t *iovs, rawobj_t *cksum) { struct krb5_enctype *ke = &enctypes[enctype]; - struct ll_crypto_hash *tfm; + struct crypto_hash *tfm; __u32 code = GSS_S_FAILURE; int rc; - if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) { + if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) { CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name); return GSS_S_FAILURE; } - cksum->len = ll_crypto_hash_digestsize(tfm); - OBD_ALLOC(cksum->data, cksum->len); + cksum->len = crypto_hash_digestsize(tfm); + OBD_ALLOC_LARGE(cksum->data, cksum->len); if (!cksum->data) { cksum->len = 0; goto out_tfm; @@ -702,50 +693,108 @@ __s32 krb5_make_checksum(__u32 enctype, if (ke->ke_hash_hmac) rc = krb5_digest_hmac(tfm, &kb->kb_key, - khdr, msgcnt, msgs, cksum); + khdr, msgcnt, msgs, iovcnt, iovs, cksum); else rc = krb5_digest_norm(tfm, kb, - khdr, msgcnt, msgs, cksum); + khdr, msgcnt, msgs, iovcnt, iovs, cksum); if (rc == 0) code = GSS_S_COMPLETE; out_tfm: - ll_crypto_free_hash(tfm); + crypto_free_hash(tfm); return code; } +static void fill_krb5_header(struct krb5_ctx *kctx, + struct krb5_header *khdr, + int privacy) +{ + unsigned char acceptor_flag; + + acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR; + + if (privacy) { + khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG); + khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL; + khdr->kh_ec = cpu_to_be16(0); + khdr->kh_rrc = cpu_to_be16(0); + } else { + khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG); + khdr->kh_flags = acceptor_flag; + khdr->kh_ec = cpu_to_be16(0xffff); + khdr->kh_rrc = cpu_to_be16(0xffff); + } + + khdr->kh_filler = 0xff; + spin_lock(&krb5_seq_lock); + khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++); + spin_unlock(&krb5_seq_lock); +} + +static __u32 verify_krb5_header(struct krb5_ctx *kctx, + struct krb5_header *khdr, + int privacy) +{ + unsigned char acceptor_flag; + __u16 tok_id, ec_rrc; + + acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0; + + if (privacy) { + tok_id = KG_TOK_WRAP_MSG; + ec_rrc = 0x0; + } else { + tok_id = KG_TOK_MIC_MSG; + ec_rrc = 0xffff; + } + + /* sanity checks */ + if (be16_to_cpu(khdr->kh_tok_id) != tok_id) { + CERROR("bad token id\n"); + return GSS_S_DEFECTIVE_TOKEN; + } + if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) { + CERROR("bad direction flag\n"); + return GSS_S_BAD_SIG; + } + if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) { + CERROR("missing confidential flag\n"); + return GSS_S_BAD_SIG; + } + if (khdr->kh_filler != 0xff) { + CERROR("bad filler\n"); + return GSS_S_DEFECTIVE_TOKEN; + } + if (be16_to_cpu(khdr->kh_ec) != ec_rrc || + be16_to_cpu(khdr->kh_rrc) != ec_rrc) { + CERROR("bad EC or RRC\n"); + return GSS_S_DEFECTIVE_TOKEN; + } + return GSS_S_COMPLETE; +} + static __u32 gss_get_mic_kerberos(struct gss_ctx *gctx, int msgcnt, rawobj_t *msgs, + int iovcnt, + lnet_kiov_t *iovs, rawobj_t *token) { struct krb5_ctx *kctx = gctx->internal_ctx_id; struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; struct krb5_header *khdr; - unsigned char acceptor_flag; rawobj_t cksum = RAWOBJ_EMPTY; - __u32 rc = GSS_S_FAILURE; - - acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR; /* fill krb5 header */ LASSERT(token->len >= sizeof(*khdr)); khdr = (struct krb5_header *) token->data; - - khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG); - khdr->kh_flags = acceptor_flag; - khdr->kh_filler = 0xff; - khdr->kh_ec = cpu_to_be16(0xffff); - khdr->kh_rrc = cpu_to_be16(0xffff); - spin_lock(&krb5_seq_lock); - khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++); - spin_unlock(&krb5_seq_lock); + fill_krb5_header(kctx, khdr, 0); /* checksum */ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, - khdr, msgcnt, msgs, &cksum)) - goto out_err; + khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) + return GSS_S_FAILURE; LASSERT(cksum.len >= ke->ke_hash_size); LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size); @@ -753,26 +802,23 @@ __u32 gss_get_mic_kerberos(struct gss_ctx *gctx, ke->ke_hash_size); token->len = sizeof(*khdr) + ke->ke_hash_size; - rc = GSS_S_COMPLETE; -out_err: rawobj_free(&cksum); - return rc; + return GSS_S_COMPLETE; } static __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx, int msgcnt, rawobj_t *msgs, + int iovcnt, + lnet_kiov_t *iovs, rawobj_t *token) { struct krb5_ctx *kctx = gctx->internal_ctx_id; struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; struct krb5_header *khdr; - unsigned char acceptor_flag; rawobj_t cksum = RAWOBJ_EMPTY; - __u32 rc = GSS_S_FAILURE; - - acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0; + __u32 major; if (token->len < sizeof(*khdr)) { CERROR("short signature: %u\n", token->len); @@ -781,47 +827,34 @@ __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx, khdr = (struct krb5_header *) token->data; - /* sanity checks */ - if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_MIC_MSG) { - CERROR("bad token id\n"); - return GSS_S_DEFECTIVE_TOKEN; - } - if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) { - CERROR("bad direction flag\n"); - return GSS_S_BAD_SIG; - } - if (khdr->kh_filler != 0xff) { - CERROR("bad filler\n"); - return GSS_S_DEFECTIVE_TOKEN; - } - if (be16_to_cpu(khdr->kh_ec) != 0xffff || - be16_to_cpu(khdr->kh_rrc) != 0xffff) { - CERROR("bad EC or RRC\n"); - return GSS_S_DEFECTIVE_TOKEN; + major = verify_krb5_header(kctx, khdr, 0); + if (major != GSS_S_COMPLETE) { + CERROR("bad krb5 header\n"); + return major; } if (token->len < sizeof(*khdr) + ke->ke_hash_size) { CERROR("short signature: %u, require %d\n", token->len, (int) sizeof(*khdr) + ke->ke_hash_size); - goto out; + return GSS_S_FAILURE; } if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, - khdr, msgcnt, msgs, &cksum)) + khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) { + CERROR("failed to make checksum\n"); return GSS_S_FAILURE; + } LASSERT(cksum.len >= ke->ke_hash_size); if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size, ke->ke_hash_size)) { CERROR("checksum mismatch\n"); - rc = GSS_S_BAD_SIG; - goto out; + rawobj_free(&cksum); + return GSS_S_BAD_SIG; } - rc = GSS_S_COMPLETE; -out: rawobj_free(&cksum); - return rc; + return GSS_S_COMPLETE; } static @@ -846,7 +879,7 @@ int add_padding(rawobj_t *msg, int msg_buflen, int blocksize) } static -int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm, +int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm, int mode_ecb, int inobj_cnt, rawobj_t *inobjs, @@ -873,17 +906,17 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm, if (mode_ecb) { if (enc) - rc = ll_crypto_blkcipher_encrypt( + rc = crypto_blkcipher_encrypt( &desc, &dst, &src, src.length); else - rc = ll_crypto_blkcipher_decrypt( + rc = crypto_blkcipher_decrypt( &desc, &dst, &src, src.length); } else { if (enc) - rc = ll_crypto_blkcipher_encrypt_iv( + rc = crypto_blkcipher_encrypt_iv( &desc, &dst, &src, src.length); else - rc = ll_crypto_blkcipher_decrypt_iv( + rc = crypto_blkcipher_decrypt_iv( &desc, &dst, &src, src.length); } @@ -900,8 +933,251 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm, RETURN(0); } +/* + * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size. + */ +static +int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, + struct krb5_header *khdr, + char *confounder, + struct ptlrpc_bulk_desc *desc, + rawobj_t *cipher, + int adj_nob) +{ + struct blkcipher_desc ciph_desc; + __u8 local_iv[16] = {0}; + struct scatterlist src, dst; + int blocksize, i, rc, nob = 0; + + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + LASSERT(desc->bd_iov_count); + LASSERT(GET_ENC_KIOV(desc)); + + blocksize = crypto_blkcipher_blocksize(tfm); + LASSERT(blocksize > 1); + LASSERT(cipher->len == blocksize + sizeof(*khdr)); + + ciph_desc.tfm = tfm; + ciph_desc.info = local_iv; + ciph_desc.flags = 0; + + /* encrypt confounder */ + buf_to_sg(&src, confounder, blocksize); + buf_to_sg(&dst, cipher->data, blocksize); + + rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize); + if (rc) { + CERROR("error to encrypt confounder: %d\n", rc); + return rc; + } + + /* encrypt clear pages */ + for (i = 0; i < desc->bd_iov_count; i++) { + sg_init_table(&src, 1); + sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page, + (BD_GET_KIOV(desc, i).kiov_len + + blocksize - 1) & + (~(blocksize - 1)), + BD_GET_KIOV(desc, i).kiov_offset); + if (adj_nob) + nob += src.length; + sg_init_table(&dst, 1); + sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page, + src.length, src.offset); + + BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset; + BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length; + + rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, + src.length); + if (rc) { + CERROR("error to encrypt page: %d\n", rc); + return rc; + } + } + + /* encrypt krb5 header */ + buf_to_sg(&src, khdr, sizeof(*khdr)); + buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr)); + + rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, + sizeof(*khdr)); + if (rc) { + CERROR("error to encrypt krb5 header: %d\n", rc); + return rc; + } + + if (adj_nob) + desc->bd_nob = nob; + + return 0; +} + +/* + * desc->bd_nob_transferred is the size of cipher text received. + * desc->bd_nob is the target size of plain text supposed to be. + * + * if adj_nob != 0, we adjust each page's kiov_len to the actual + * plain text size. + * - for client read: we don't know data size for each page, so + * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might + * be smaller, so we need to adjust it according to + * bd_u.bd_kiov.bd_enc_vec[]->kiov_len. + * this means we DO NOT support the situation that server send an odd size + * data in a page which is not the last one. + * - for server write: we knows exactly data size for each page being expected, + * thus kiov_len is accurate already, so we should not adjust it at all. + * and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be + * round_up(bd_iov[]->kiov_len) which + * should have been done by prep_bulk(). + */ +static +int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, + struct krb5_header *khdr, + struct ptlrpc_bulk_desc *desc, + rawobj_t *cipher, + rawobj_t *plain, + int adj_nob) +{ + struct blkcipher_desc ciph_desc; + __u8 local_iv[16] = {0}; + struct scatterlist src, dst; + int ct_nob = 0, pt_nob = 0; + int blocksize, i, rc; + + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + LASSERT(desc->bd_iov_count); + LASSERT(GET_ENC_KIOV(desc)); + LASSERT(desc->bd_nob_transferred); + + blocksize = crypto_blkcipher_blocksize(tfm); + LASSERT(blocksize > 1); + LASSERT(cipher->len == blocksize + sizeof(*khdr)); + + ciph_desc.tfm = tfm; + ciph_desc.info = local_iv; + ciph_desc.flags = 0; + + if (desc->bd_nob_transferred % blocksize) { + CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred); + return -EPROTO; + } + + /* decrypt head (confounder) */ + buf_to_sg(&src, cipher->data, blocksize); + buf_to_sg(&dst, plain->data, blocksize); + + rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize); + if (rc) { + CERROR("error to decrypt confounder: %d\n", rc); + return rc; + } + + for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred; + i++) { + if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize + != 0 || + BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize + != 0) { + CERROR("page %d: odd offset %u len %u, blocksize %d\n", + i, BD_GET_ENC_KIOV(desc, i).kiov_offset, + BD_GET_ENC_KIOV(desc, i).kiov_len, + blocksize); + return -EFAULT; + } + + if (adj_nob) { + if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len > + desc->bd_nob_transferred) + BD_GET_ENC_KIOV(desc, i).kiov_len = + desc->bd_nob_transferred - ct_nob; + + BD_GET_KIOV(desc, i).kiov_len = + BD_GET_ENC_KIOV(desc, i).kiov_len; + if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len > + desc->bd_nob) + BD_GET_KIOV(desc, i).kiov_len = + desc->bd_nob - pt_nob; + } else { + /* this should be guaranteed by LNET */ + LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i). + kiov_len <= + desc->bd_nob_transferred); + LASSERT(BD_GET_KIOV(desc, i).kiov_len <= + BD_GET_ENC_KIOV(desc, i).kiov_len); + } + + if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0) + continue; + + sg_init_table(&src, 1); + sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page, + BD_GET_ENC_KIOV(desc, i).kiov_len, + BD_GET_ENC_KIOV(desc, i).kiov_offset); + dst = src; + if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0) + sg_assign_page(&dst, + BD_GET_KIOV(desc, i).kiov_page); + + rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, + src.length); + if (rc) { + CERROR("error to decrypt page: %d\n", rc); + return rc; + } + + if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) { + memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) + + BD_GET_KIOV(desc, i).kiov_offset, + page_address(BD_GET_ENC_KIOV(desc, i). + kiov_page) + + BD_GET_KIOV(desc, i).kiov_offset, + BD_GET_KIOV(desc, i).kiov_len); + } + + ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len; + pt_nob += BD_GET_KIOV(desc, i).kiov_len; + } + + if (unlikely(ct_nob != desc->bd_nob_transferred)) { + CERROR("%d cipher text transferred but only %d decrypted\n", + desc->bd_nob_transferred, ct_nob); + return -EFAULT; + } + + if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) { + CERROR("%d plain text expected but only %d received\n", + desc->bd_nob, pt_nob); + return -EFAULT; + } + + /* if needed, clear up the rest unused iovs */ + if (adj_nob) + while (i < desc->bd_iov_count) + BD_GET_KIOV(desc, i++).kiov_len = 0; + + /* decrypt tail (krb5 header) */ + buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr)); + buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr)); + + rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, + sizeof(*khdr)); + if (rc) { + CERROR("error to decrypt tail: %d\n", rc); + return rc; + } + + if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) { + CERROR("krb5 header doesn't match\n"); + return -EACCES; + } + + return 0; +} + static __u32 gss_wrap_kerberos(struct gss_ctx *gctx, + rawobj_t *gsshdr, rawobj_t *msg, int msg_buflen, rawobj_t *token) @@ -909,36 +1185,32 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, struct krb5_ctx *kctx = gctx->internal_ctx_id; struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; struct krb5_header *khdr; - unsigned char acceptor_flag; int blocksize; rawobj_t cksum = RAWOBJ_EMPTY; rawobj_t data_desc[3], cipher; __u8 conf[GSS_MAX_CIPHER_BLOCK]; - int enc_rc = 0; + int rc = 0; LASSERT(ke); LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK); LASSERT(kctx->kc_keye.kb_tfm == NULL || ke->ke_conf_size >= - ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm)); + crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm)); - acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR; + /* + * final token format: + * --------------------------------------------------- + * | krb5 header | cipher text | checksum (16 bytes) | + * --------------------------------------------------- + */ /* fill krb5 header */ LASSERT(token->len >= sizeof(*khdr)); khdr = (struct krb5_header *) token->data; - - khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG); - khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL; - khdr->kh_filler = 0xff; - khdr->kh_ec = cpu_to_be16(0); - khdr->kh_rrc = cpu_to_be16(0); - spin_lock(&krb5_seq_lock); - khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++); - spin_unlock(&krb5_seq_lock); + fill_krb5_header(kctx, khdr, 1); /* generate confounder */ - get_random_bytes(conf, ke->ke_conf_size); + cfs_get_random_bytes(conf, ke->ke_conf_size); /* get encryption blocksize. note kc_keye might not associated with * a tfm, currently only for arcfour-hmac */ @@ -947,7 +1219,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, blocksize = 1; } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); } LASSERT(blocksize <= ke->ke_conf_size); @@ -956,7 +1228,26 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, return GSS_S_FAILURE; /* - * clear text layout, same for both checksum & encryption: + * clear text layout for checksum: + * ------------------------------------------------------ + * | confounder | gss header | clear msgs | krb5 header | + * ------------------------------------------------------ + */ + data_desc[0].data = conf; + data_desc[0].len = ke->ke_conf_size; + data_desc[1].data = gsshdr->data; + data_desc[1].len = gsshdr->len; + data_desc[2].data = msg->data; + data_desc[2].len = msg->len; + + /* compute checksum */ + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 3, data_desc, 0, NULL, &cksum)) + return GSS_S_FAILURE; + LASSERT(cksum.len >= ke->ke_hash_size); + + /* + * clear text layout for encryption: * ----------------------------------------- * | confounder | clear msgs | krb5 header | * ----------------------------------------- @@ -968,54 +1259,196 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, data_desc[2].data = (__u8 *) khdr; data_desc[2].len = sizeof(*khdr); - /* compute checksum */ - if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, - khdr, 3, data_desc, &cksum)) - return GSS_S_FAILURE; - LASSERT(cksum.len >= ke->ke_hash_size); - - /* encrypting, cipher text will be directly inplace */ + /* cipher text will be directly inplace */ cipher.data = (__u8 *) (khdr + 1); cipher.len = token->len - sizeof(*khdr); LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr)); - if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { - rawobj_t arc4_keye; - struct ll_crypto_cipher *arc4_tfm; + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + rawobj_t arc4_keye; + struct crypto_blkcipher *arc4_tfm; - if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, - NULL, 1, &cksum, &arc4_keye)) { - CERROR("failed to obtain arc4 enc key\n"); - GOTO(arc4_out, enc_rc = -EACCES); - } + if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, + NULL, 1, &cksum, 0, NULL, &arc4_keye)) { + CERROR("failed to obtain arc4 enc key\n"); + GOTO(arc4_out, rc = -EACCES); + } - arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0); - if (arc4_tfm == NULL) { - CERROR("failed to alloc tfm arc4 in ECB mode\n"); - GOTO(arc4_out_key, enc_rc = -EACCES); - } + arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0); + if (IS_ERR(arc4_tfm)) { + CERROR("failed to alloc tfm arc4 in ECB mode\n"); + GOTO(arc4_out_key, rc = -EACCES); + } - if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, + if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, arc4_keye.len)) { CERROR("failed to set arc4 key, len %d\n", arc4_keye.len); - GOTO(arc4_out_tfm, enc_rc = -EACCES); + GOTO(arc4_out_tfm, rc = -EACCES); } - enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1, - 3, data_desc, &cipher, 1); + rc = krb5_encrypt_rawobjs(arc4_tfm, 1, + 3, data_desc, &cipher, 1); arc4_out_tfm: - ll_crypto_free_blkcipher(arc4_tfm); + crypto_free_blkcipher(arc4_tfm); arc4_out_key: rawobj_free(&arc4_keye); arc4_out: do {} while(0); /* just to avoid compile warning */ } else { - enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0, - 3, data_desc, &cipher, 1); + rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0, + 3, data_desc, &cipher, 1); } - if (enc_rc != 0) { + if (rc != 0) { + rawobj_free(&cksum); + return GSS_S_FAILURE; + } + + /* fill in checksum */ + LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size); + memcpy((char *)(khdr + 1) + cipher.len, + cksum.data + cksum.len - ke->ke_hash_size, + ke->ke_hash_size); + rawobj_free(&cksum); + + /* final token length */ + token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size; + return GSS_S_COMPLETE; +} + +static +__u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx, + struct ptlrpc_bulk_desc *desc) +{ + struct krb5_ctx *kctx = gctx->internal_ctx_id; + int blocksize, i; + + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + LASSERT(desc->bd_iov_count); + LASSERT(GET_ENC_KIOV(desc)); + LASSERT(kctx->kc_keye.kb_tfm); + + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + + for (i = 0; i < desc->bd_iov_count; i++) { + LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page); + /* + * offset should always start at page boundary of either + * client or server side. + */ + if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) { + CERROR("odd offset %d in page %d\n", + BD_GET_KIOV(desc, i).kiov_offset, i); + return GSS_S_FAILURE; + } + + BD_GET_ENC_KIOV(desc, i).kiov_offset = + BD_GET_KIOV(desc, i).kiov_offset; + BD_GET_ENC_KIOV(desc, i).kiov_len = + (BD_GET_KIOV(desc, i).kiov_len + + blocksize - 1) & (~(blocksize - 1)); + } + + return GSS_S_COMPLETE; +} + +static +__u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, + struct ptlrpc_bulk_desc *desc, + rawobj_t *token, int adj_nob) +{ + struct krb5_ctx *kctx = gctx->internal_ctx_id; + struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; + struct krb5_header *khdr; + int blocksize; + rawobj_t cksum = RAWOBJ_EMPTY; + rawobj_t data_desc[1], cipher; + __u8 conf[GSS_MAX_CIPHER_BLOCK]; + int rc = 0; + + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + LASSERT(ke); + LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK); + + /* + * final token format: + * -------------------------------------------------- + * | krb5 header | head/tail cipher text | checksum | + * -------------------------------------------------- + */ + + /* fill krb5 header */ + LASSERT(token->len >= sizeof(*khdr)); + khdr = (struct krb5_header *) token->data; + fill_krb5_header(kctx, khdr, 1); + + /* generate confounder */ + cfs_get_random_bytes(conf, ke->ke_conf_size); + + /* get encryption blocksize. note kc_keye might not associated with + * a tfm, currently only for arcfour-hmac */ + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + LASSERT(kctx->kc_keye.kb_tfm == NULL); + blocksize = 1; + } else { + LASSERT(kctx->kc_keye.kb_tfm); + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + } + + /* + * we assume the size of krb5_header (16 bytes) must be n * blocksize. + * the bulk token size would be exactly (sizeof(krb5_header) + + * blocksize + sizeof(krb5_header) + hashsize) + */ + LASSERT(blocksize <= ke->ke_conf_size); + LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0); + LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16); + + /* + * clear text layout for checksum: + * ------------------------------------------ + * | confounder | clear pages | krb5 header | + * ------------------------------------------ + */ + data_desc[0].data = conf; + data_desc[0].len = ke->ke_conf_size; + + /* compute checksum */ + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 1, data_desc, + desc->bd_iov_count, GET_KIOV(desc), + &cksum)) + return GSS_S_FAILURE; + LASSERT(cksum.len >= ke->ke_hash_size); + + /* + * clear text layout for encryption: + * ------------------------------------------ + * | confounder | clear pages | krb5 header | + * ------------------------------------------ + * | | | + * ---------- (cipher pages) | + * result token: | | + * ------------------------------------------- + * | krb5 header | cipher text | cipher text | + * ------------------------------------------- + */ + data_desc[0].data = conf; + data_desc[0].len = ke->ke_conf_size; + + cipher.data = (__u8 *) (khdr + 1); + cipher.len = blocksize + sizeof(*khdr); + + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + LBUG(); + rc = 0; + } else { + rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr, + conf, desc, &cipher, adj_nob); + } + + if (rc != 0) { rawobj_free(&cksum); return GSS_S_FAILURE; } @@ -1034,23 +1467,23 @@ arc4_out: static __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, + rawobj_t *gsshdr, rawobj_t *token, rawobj_t *msg) { struct krb5_ctx *kctx = gctx->internal_ctx_id; struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; struct krb5_header *khdr; - unsigned char acceptor_flag; unsigned char *tmpbuf; int blocksize, bodysize; rawobj_t cksum = RAWOBJ_EMPTY; rawobj_t cipher_in, plain_out; - __u32 rc = GSS_S_FAILURE, enc_rc = 0; + rawobj_t hash_objs[3]; + int rc = 0; + __u32 major; LASSERT(ke); - acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0; - if (token->len < sizeof(*khdr)) { CERROR("short signature: %u\n", token->len); return GSS_S_DEFECTIVE_TOKEN; @@ -1058,27 +1491,10 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, khdr = (struct krb5_header *) token->data; - /* sanity check header */ - if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_WRAP_MSG) { - CERROR("bad token id\n"); - return GSS_S_DEFECTIVE_TOKEN; - } - if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) { - CERROR("bad direction flag\n"); - return GSS_S_BAD_SIG; - } - if ((khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) { - CERROR("missing confidential flag\n"); - return GSS_S_BAD_SIG; - } - if (khdr->kh_filler != 0xff) { - CERROR("bad filler\n"); - return GSS_S_DEFECTIVE_TOKEN; - } - if (be16_to_cpu(khdr->kh_ec) != 0x0 || - be16_to_cpu(khdr->kh_rrc) != 0x0) { - CERROR("bad EC or RRC\n"); - return GSS_S_DEFECTIVE_TOKEN; + major = verify_krb5_header(kctx, khdr, 1); + if (major != GSS_S_COMPLETE) { + CERROR("bad krb5 header\n"); + return major; } /* block size */ @@ -1087,7 +1503,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, blocksize = 1; } else { LASSERT(kctx->kc_keye.kb_tfm); - blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); } /* expected token layout: @@ -1114,55 +1530,57 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, } /* decrypting */ - OBD_ALLOC(tmpbuf, bodysize); + OBD_ALLOC_LARGE(tmpbuf, bodysize); if (!tmpbuf) return GSS_S_FAILURE; + major = GSS_S_FAILURE; + cipher_in.data = (__u8 *) (khdr + 1); cipher_in.len = bodysize; plain_out.data = tmpbuf; plain_out.len = bodysize; - if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { - rawobj_t arc4_keye; - struct ll_crypto_cipher *arc4_tfm; + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + rawobj_t arc4_keye; + struct crypto_blkcipher *arc4_tfm; - cksum.data = token->data + token->len - ke->ke_hash_size; - cksum.len = ke->ke_hash_size; + cksum.data = token->data + token->len - ke->ke_hash_size; + cksum.len = ke->ke_hash_size; - if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, - NULL, 1, &cksum, &arc4_keye)) { - CERROR("failed to obtain arc4 enc key\n"); - GOTO(arc4_out, enc_rc = -EACCES); - } + if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, + NULL, 1, &cksum, 0, NULL, &arc4_keye)) { + CERROR("failed to obtain arc4 enc key\n"); + GOTO(arc4_out, rc = -EACCES); + } - arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0); - if (arc4_tfm == NULL) { - CERROR("failed to alloc tfm arc4 in ECB mode\n"); - GOTO(arc4_out_key, enc_rc = -EACCES); - } + arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0); + if (IS_ERR(arc4_tfm)) { + CERROR("failed to alloc tfm arc4 in ECB mode\n"); + GOTO(arc4_out_key, rc = -EACCES); + } - if (ll_crypto_blkcipher_setkey(arc4_tfm, + if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, arc4_keye.len)) { CERROR("failed to set arc4 key, len %d\n", arc4_keye.len); - GOTO(arc4_out_tfm, enc_rc = -EACCES); + GOTO(arc4_out_tfm, rc = -EACCES); } - enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1, - 1, &cipher_in, &plain_out, 0); + rc = krb5_encrypt_rawobjs(arc4_tfm, 1, + 1, &cipher_in, &plain_out, 0); arc4_out_tfm: - ll_crypto_free_blkcipher(arc4_tfm); + crypto_free_blkcipher(arc4_tfm); arc4_out_key: rawobj_free(&arc4_keye); arc4_out: cksum = RAWOBJ_EMPTY; } else { - enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0, - 1, &cipher_in, &plain_out, 0); + rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0, + 1, &cipher_in, &plain_out, 0); } - if (enc_rc != 0) { + if (rc != 0) { CERROR("error decrypt\n"); goto out_free; } @@ -1174,52 +1592,137 @@ arc4_out: * ----------------------------------------- */ - /* last part must be identical to the krb5 header */ + /* verify krb5 header in token is not modified */ if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr), sizeof(*khdr))) { - CERROR("decrypted header mismatch\n"); + CERROR("decrypted krb5 header mismatch\n"); goto out_free; } - /* verify checksum */ + /* verify checksum, compose clear text as layout: + * ------------------------------------------------------ + * | confounder | gss header | clear msgs | krb5 header | + * ------------------------------------------------------ + */ + hash_objs[0].len = ke->ke_conf_size; + hash_objs[0].data = plain_out.data; + hash_objs[1].len = gsshdr->len; + hash_objs[1].data = gsshdr->data; + hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr); + hash_objs[2].data = plain_out.data + ke->ke_conf_size; if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, - khdr, 1, &plain_out, &cksum)) + khdr, 3, hash_objs, 0, NULL, &cksum)) goto out_free; LASSERT(cksum.len >= ke->ke_hash_size); if (memcmp((char *)(khdr + 1) + bodysize, cksum.data + cksum.len - ke->ke_hash_size, ke->ke_hash_size)) { - CERROR("cksum mismatch\n"); + CERROR("checksum mismatch\n"); goto out_free; } msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr); memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len); - rc = GSS_S_COMPLETE; + major = GSS_S_COMPLETE; out_free: - OBD_FREE(tmpbuf, bodysize); + OBD_FREE_LARGE(tmpbuf, bodysize); rawobj_free(&cksum); - return rc; + return major; } static -__u32 gss_plain_encrypt_kerberos(struct gss_ctx *ctx, - int decrypt, - int length, - void *in_buf, - void *out_buf) +__u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, + struct ptlrpc_bulk_desc *desc, + rawobj_t *token, int adj_nob) { - struct krb5_ctx *kctx = ctx->internal_ctx_id; - __u32 rc; + struct krb5_ctx *kctx = gctx->internal_ctx_id; + struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; + struct krb5_header *khdr; + int blocksize; + rawobj_t cksum = RAWOBJ_EMPTY; + rawobj_t cipher, plain; + rawobj_t data_desc[1]; + int rc; + __u32 major; - rc = krb5_encrypt(kctx->kc_keye.kb_tfm, decrypt, - NULL, in_buf, out_buf, length); + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + LASSERT(ke); + + if (token->len < sizeof(*khdr)) { + CERROR("short signature: %u\n", token->len); + return GSS_S_DEFECTIVE_TOKEN; + } + + khdr = (struct krb5_header *) token->data; + + major = verify_krb5_header(kctx, khdr, 1); + if (major != GSS_S_COMPLETE) { + CERROR("bad krb5 header\n"); + return major; + } + + /* block size */ + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + LASSERT(kctx->kc_keye.kb_tfm == NULL); + blocksize = 1; + LBUG(); + } else { + LASSERT(kctx->kc_keye.kb_tfm); + blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); + } + LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0); + + /* + * token format is expected as: + * ----------------------------------------------- + * | krb5 header | head/tail cipher text | cksum | + * ----------------------------------------------- + */ + if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) + + ke->ke_hash_size) { + CERROR("short token size: %u\n", token->len); + return GSS_S_DEFECTIVE_TOKEN; + } + + cipher.data = (__u8 *) (khdr + 1); + cipher.len = blocksize + sizeof(*khdr); + plain.data = cipher.data; + plain.len = cipher.len; + + rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr, + desc, &cipher, &plain, adj_nob); if (rc) - CERROR("plain encrypt error: %d\n", rc); + return GSS_S_DEFECTIVE_TOKEN; - return rc; + /* + * verify checksum, compose clear text as layout: + * ------------------------------------------ + * | confounder | clear pages | krb5 header | + * ------------------------------------------ + */ + data_desc[0].data = plain.data; + data_desc[0].len = blocksize; + + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 1, data_desc, + desc->bd_iov_count, + GET_KIOV(desc), + &cksum)) + return GSS_S_FAILURE; + LASSERT(cksum.len >= ke->ke_hash_size); + + if (memcmp(plain.data + blocksize + sizeof(*khdr), + cksum.data + cksum.len - ke->ke_hash_size, + ke->ke_hash_size)) { + CERROR("checksum mismatch\n"); + rawobj_free(&cksum); + return GSS_S_BAD_SIG; + } + + rawobj_free(&cksum); + return GSS_S_COMPLETE; } int gss_display_kerberos(struct gss_ctx *ctx, @@ -1242,7 +1745,9 @@ static struct gss_api_ops gss_kerberos_ops = { .gss_verify_mic = gss_verify_mic_kerberos, .gss_wrap = gss_wrap_kerberos, .gss_unwrap = gss_unwrap_kerberos, - .gss_plain_encrypt = gss_plain_encrypt_kerberos, + .gss_prep_bulk = gss_prep_bulk_kerberos, + .gss_wrap_bulk = gss_wrap_bulk_kerberos, + .gss_unwrap_bulk = gss_unwrap_bulk_kerberos, .gss_delete_sec_context = gss_delete_sec_context_kerberos, .gss_display = gss_display_kerberos, }; @@ -1289,15 +1794,17 @@ static struct gss_api_mech gss_kerberos_mech = { int __init init_kerberos_module(void) { - int status; + int status; + + spin_lock_init(&krb5_seq_lock); - status = lgss_mech_register(&gss_kerberos_mech); - if (status) - CERROR("Failed to register kerberos gss mechanism!\n"); - return status; + status = lgss_mech_register(&gss_kerberos_mech); + if (status) + CERROR("Failed to register kerberos gss mechanism!\n"); + return status; } -void __exit cleanup_kerberos_module(void) +void cleanup_kerberos_module(void) { lgss_mech_unregister(&gss_kerberos_mech); }