X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fgss%2Fgss_krb5_mech.c;h=e0ddff9023639b3d19b021a7a9e824c55df8c4a7;hb=0a65279121a5a0f5c8831dd2ebd6927a235a94c2;hp=2e03843a1fa74c00fd53b25648e7554dc1d9aa0d;hpb=9b73c02192b3e16c322402e8c080e660ba2c457c;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index 2e03843..e0ddff9 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -1,9 +1,10 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * Modifications for Lustre - * Copyright 2004 - 2006, Cluster File Systems, Inc. - * All rights reserved + * + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * + * Copyright (c) 2011, 2015, Intel Corporation. + * * Author: Eric Mei */ @@ -47,25 +48,17 @@ * */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #define DEBUG_SUBSYSTEM S_SEC -#ifdef __KERNEL__ #include #include +#include #include #include -#include #include -#else -#include -#endif #include #include #include -#include #include #include #include @@ -75,8 +68,9 @@ #include "gss_api.h" #include "gss_asn1.h" #include "gss_krb5.h" +#include "gss_crypto.h" -spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(krb5_seq_lock); struct krb5_enctype { char *ke_dispname; @@ -94,264 +88,173 @@ struct krb5_enctype { * yet. this need to be fixed in the future. */ static struct krb5_enctype enctypes[] = { - [ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */ - "des-cbc-md5", - "des", - "md5", - CRYPTO_TFM_MODE_CBC, - 16, - 8, - 0, - }, - [ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */ - "des3-hmac-sha1", - "des3_ede", - "sha1", - CRYPTO_TFM_MODE_CBC, - 20, - 8, - 1, - }, - [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */ - "aes128-cts-hmac-sha1-96", - "aes", - "sha1", - CRYPTO_TFM_MODE_CBC, - 12, - 16, - 1, - }, - [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */ - "aes256-cts-hmac-sha1-96", - "aes", - "sha1", - CRYPTO_TFM_MODE_CBC, - 12, - 16, - 1, - }, - [ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */ - "arcfour-hmac-md5", - "arc4", - "md5", - CRYPTO_TFM_MODE_ECB, - 16, - 8, - 1, - }, + [ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */ + .ke_dispname = "des-cbc-md5", + .ke_enc_name = "cbc(des)", + .ke_hash_name = "md5", + .ke_hash_size = 16, + .ke_conf_size = 8, + }, +#ifdef HAVE_DES3_SUPPORT + [ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */ + .ke_dispname = "des3-hmac-sha1", + .ke_enc_name = "cbc(des3_ede)", + .ke_hash_name = "sha1", + .ke_hash_size = 20, + .ke_conf_size = 8, + .ke_hash_hmac = 1, + }, +#endif + [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */ + .ke_dispname = "aes128-cts-hmac-sha1-96", + .ke_enc_name = "cbc(aes)", + .ke_hash_name = "sha1", + .ke_hash_size = 12, + .ke_conf_size = 16, + .ke_hash_hmac = 1, + }, + [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */ + .ke_dispname = "aes256-cts-hmac-sha1-96", + .ke_enc_name = "cbc(aes)", + .ke_hash_name = "sha1", + .ke_hash_size = 12, + .ke_conf_size = 16, + .ke_hash_hmac = 1, + }, + [ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */ + .ke_dispname = "arcfour-hmac-md5", + .ke_enc_name = "ecb(arc4)", + .ke_hash_name = "md5", + .ke_hash_size = 16, + .ke_conf_size = 8, + .ke_hash_hmac = 1, + } }; -#define MAX_ENCTYPES sizeof(enctypes)/sizeof(struct krb5_enctype) - static const char * enctype2str(__u32 enctype) { - if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname) - return enctypes[enctype].ke_dispname; - - return "unknown"; -} - -static -int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode) -{ - kb->kb_tfm = crypto_alloc_tfm(alg_name, alg_mode); - if (kb->kb_tfm == NULL) { - CERROR("failed to alloc tfm: %s, mode %d\n", - alg_name, alg_mode); - return -1; - } + if (enctype < ARRAY_SIZE(enctypes) && enctypes[enctype].ke_dispname) + return enctypes[enctype].ke_dispname; - if (crypto_cipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) { - CERROR("failed to set %s key, len %d\n", - alg_name, kb->kb_key.len); - return -1; - } - - return 0; + return "unknown"; } static int krb5_init_keys(struct krb5_ctx *kctx) { - struct krb5_enctype *ke; + struct krb5_enctype *ke; - if (kctx->kc_enctype >= MAX_ENCTYPES || - enctypes[kctx->kc_enctype].ke_hash_size == 0) { - CERROR("unsupported enctype %x\n", kctx->kc_enctype); - return -1; - } + if (kctx->kc_enctype >= ARRAY_SIZE(enctypes) || + enctypes[kctx->kc_enctype].ke_hash_size == 0) { + CERROR("unsupported enctype %x\n", kctx->kc_enctype); + return -1; + } ke = &enctypes[kctx->kc_enctype]; - /* tfm arc4 is stateful, user should alloc-use-free by his own */ - if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC && - keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode)) - return -1; + /* tfm arc4 is stateful, user should alloc-use-free by his own */ + if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC && + gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode)) + return -1; - /* tfm hmac is stateful, user should alloc-use-free by his own */ - if (ke->ke_hash_hmac == 0 && - keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode)) - return -1; - if (ke->ke_hash_hmac == 0 && - keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode)) - return -1; + /* tfm hmac is stateful, user should alloc-use-free by his own */ + if (ke->ke_hash_hmac == 0 && + gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode)) + return -1; + if (ke->ke_hash_hmac == 0 && + gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode)) + return -1; return 0; } static -void keyblock_free(struct krb5_keyblock *kb) -{ - rawobj_free(&kb->kb_key); - if (kb->kb_tfm) - crypto_free_tfm(kb->kb_tfm); -} - -static -int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb) -{ - return rawobj_dup(&new->kb_key, &kb->kb_key); -} - -static -int get_bytes(char **ptr, const char *end, void *res, int len) -{ - char *p, *q; - p = *ptr; - q = p + len; - if (q > end || q < p) - return -1; - memcpy(res, p, len); - *ptr = q; - return 0; -} - -static -int get_rawobj(char **ptr, const char *end, rawobj_t *res) -{ - char *p, *q; - __u32 len; - - p = *ptr; - if (get_bytes(&p, end, &len, sizeof(len))) - return -1; - - q = p + len; - if (q > end || q < p) - return -1; - - OBD_ALLOC(res->data, len); - if (!res->data) - return -1; - - res->len = len; - memcpy(res->data, p, len); - *ptr = q; - return 0; -} - -static -int get_keyblock(char **ptr, const char *end, - struct krb5_keyblock *kb, __u32 keysize) -{ - char *buf; - - OBD_ALLOC(buf, keysize); - if (buf == NULL) - return -1; - - if (get_bytes(ptr, end, buf, keysize)) { - OBD_FREE(buf, keysize); - return -1; - } - - kb->kb_key.len = keysize; - kb->kb_key.data = buf; - return 0; -} - -static void delete_context_kerberos(struct krb5_ctx *kctx) { - rawobj_free(&kctx->kc_mech_used); + rawobj_free(&kctx->kc_mech_used); - keyblock_free(&kctx->kc_keye); - keyblock_free(&kctx->kc_keyi); - keyblock_free(&kctx->kc_keyc); + gss_keyblock_free(&kctx->kc_keye); + gss_keyblock_free(&kctx->kc_keyi); + gss_keyblock_free(&kctx->kc_keyc); } static __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end) { - unsigned int tmp_uint, keysize; - - /* seed_init flag */ - if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) - goto out_err; - kctx->kc_seed_init = (tmp_uint != 0); - - /* seed */ - if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed))) - goto out_err; - - /* sign/seal algorithm, not really used now */ - if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) || - get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) - goto out_err; - - /* end time */ - if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime))) - goto out_err; - - /* seq send */ - if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) - goto out_err; - kctx->kc_seq_send = tmp_uint; - - /* mech oid */ - if (get_rawobj(&p, end, &kctx->kc_mech_used)) - goto out_err; - - /* old style enc/seq keys in format: - * - enctype (u32) - * - keysize (u32) - * - keydata - * we decompose them to fit into the new context - */ - - /* enc key */ - if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype))) - goto out_err; - - if (get_bytes(&p, end, &keysize, sizeof(keysize))) - goto out_err; - - if (get_keyblock(&p, end, &kctx->kc_keye, keysize)) - goto out_err; - - /* seq key */ - if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) || - tmp_uint != kctx->kc_enctype) - goto out_err; - - if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) || - tmp_uint != keysize) - goto out_err; - - if (get_keyblock(&p, end, &kctx->kc_keyc, keysize)) - goto out_err; - - /* old style fallback */ - if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc)) - goto out_err; - - if (p != end) - goto out_err; - - CDEBUG(D_SEC, "succesfully imported rfc1964 context\n"); - return 0; + unsigned int tmp_uint, keysize; + + /* seed_init flag */ + if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) + goto out_err; + kctx->kc_seed_init = (tmp_uint != 0); + + /* seed */ + if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed))) + goto out_err; + + /* sign/seal algorithm, not really used now */ + if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) || + gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) + goto out_err; + + /* end time. While kc_endtime might be 64 bit the krb5 API + * still uses 32 bits. To delay the 2038 bug see the incoming + * value as a u32 which give us until 2106. See the link for details: + * + * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html + */ + if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32))) + goto out_err; + + /* seq send */ + if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) + goto out_err; + kctx->kc_seq_send = tmp_uint; + + /* mech oid */ + if (gss_get_rawobj(&p, end, &kctx->kc_mech_used)) + goto out_err; + + /* old style enc/seq keys in format: + * - enctype (u32) + * - keysize (u32) + * - keydata + * we decompose them to fit into the new context + */ + + /* enc key */ + if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype))) + goto out_err; + + if (gss_get_bytes(&p, end, &keysize, sizeof(keysize))) + goto out_err; + + if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize)) + goto out_err; + + /* seq key */ + if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) || + tmp_uint != kctx->kc_enctype) + goto out_err; + + if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) || + tmp_uint != keysize) + goto out_err; + + if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize)) + goto out_err; + + /* old style fallback */ + if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc)) + goto out_err; + + if (p != end) + goto out_err; + + CDEBUG(D_SEC, "successfully imported rfc1964 context\n"); + return 0; out_err: - return GSS_S_FAILURE; + return GSS_S_FAILURE; } /* Flags for version 2 context flags */ @@ -362,58 +265,64 @@ out_err: static __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end) { - unsigned int tmp_uint, keysize; - - /* end time */ - if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime))) - goto out_err; - - /* flags */ - if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) - goto out_err; - - if (tmp_uint & KRB5_CTX_FLAG_INITIATOR) - kctx->kc_initiate = 1; - if (tmp_uint & KRB5_CTX_FLAG_CFX) - kctx->kc_cfx = 1; - if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) - kctx->kc_have_acceptor_subkey = 1; - - /* seq send */ - if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send))) - goto out_err; - - /* enctype */ - if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype))) - goto out_err; - - /* size of each key */ - if (get_bytes(&p, end, &keysize, sizeof(keysize))) - goto out_err; - - /* number of keys - should always be 3 */ - if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) - goto out_err; - - if (tmp_uint != 3) { - CERROR("Invalid number of keys: %u\n", tmp_uint); - goto out_err; - } - - /* ke */ - if (get_keyblock(&p, end, &kctx->kc_keye, keysize)) - goto out_err; - /* ki */ - if (get_keyblock(&p, end, &kctx->kc_keyi, keysize)) - goto out_err; - /* ki */ - if (get_keyblock(&p, end, &kctx->kc_keyc, keysize)) - goto out_err; - - CDEBUG(D_SEC, "succesfully imported v2 context\n"); - return 0; + unsigned int tmp_uint, keysize; + + /* end time. While kc_endtime might be 64 bit the krb5 API + * still uses 32 bits. To delay the 2038 bug see the incoming + * value as a u32 which give us until 2106. See the link for details: + * + * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html + */ + if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32))) + goto out_err; + + /* flags */ + if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) + goto out_err; + + if (tmp_uint & KRB5_CTX_FLAG_INITIATOR) + kctx->kc_initiate = 1; + if (tmp_uint & KRB5_CTX_FLAG_CFX) + kctx->kc_cfx = 1; + if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) + kctx->kc_have_acceptor_subkey = 1; + + /* seq send */ + if (gss_get_bytes(&p, end, &kctx->kc_seq_send, + sizeof(kctx->kc_seq_send))) + goto out_err; + + /* enctype */ + if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype))) + goto out_err; + + /* size of each key */ + if (gss_get_bytes(&p, end, &keysize, sizeof(keysize))) + goto out_err; + + /* number of keys - should always be 3 */ + if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) + goto out_err; + + if (tmp_uint != 3) { + CERROR("Invalid number of keys: %u\n", tmp_uint); + goto out_err; + } + + /* ke */ + if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize)) + goto out_err; + /* ki */ + if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize)) + goto out_err; + /* ki */ + if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize)) + goto out_err; + + CDEBUG(D_SEC, "successfully imported v2 context\n"); + return 0; out_err: - return GSS_S_FAILURE; + return GSS_S_FAILURE; } /* @@ -425,15 +334,15 @@ static __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf, struct gss_ctx *gctx) { - struct krb5_ctx *kctx; - char *p = (char *) inbuf->data; - char *end = (char *) (inbuf->data + inbuf->len); - unsigned int tmp_uint, rc; + struct krb5_ctx *kctx; + char *p = (char *)inbuf->data; + char *end = (char *)(inbuf->data + inbuf->len); + unsigned int tmp_uint, rc; - if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) { - CERROR("Fail to read version\n"); - return GSS_S_FAILURE; - } + if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) { + CERROR("Fail to read version\n"); + return GSS_S_FAILURE; + } /* only support 0, 1 for the moment */ if (tmp_uint > 2) { @@ -491,17 +400,17 @@ __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx, if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used)) goto out_err; - if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye)) - goto out_err; - if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi)) - goto out_err; - if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc)) - goto out_err; + if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye)) + goto out_err; + if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi)) + goto out_err; + if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc)) + goto out_err; if (krb5_init_keys(knew)) goto out_err; gctx_new->internal_ctx_id = knew; - CDEBUG(D_SEC, "succesfully copied reverse context\n"); + CDEBUG(D_SEC, "successfully copied reverse context\n"); return GSS_S_COMPLETE; out_err: @@ -512,11 +421,11 @@ out_err: static __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx, - unsigned long *endtime) + time64_t *endtime) { struct krb5_ctx *kctx = gctx->internal_ctx_id; - *endtime = (unsigned long) ((__u32) kctx->kc_endtime); + *endtime = kctx->kc_endtime; return GSS_S_COMPLETE; } @@ -529,225 +438,123 @@ void gss_delete_sec_context_kerberos(void *internal_ctx) OBD_FREE_PTR(kctx); } -static -void buf_to_sg(struct scatterlist *sg, char *ptr, int len) -{ - sg->page = virt_to_page(ptr); - sg->offset = offset_in_page(ptr); - sg->length = len; -} - -static -__u32 krb5_encrypt(struct crypto_tfm *tfm, - int decrypt, - void * iv, - void * in, - void * out, - int length) -{ - struct scatterlist sg; - __u8 local_iv[16] = {0}; - __u32 ret = -EINVAL; - - LASSERT(tfm); - - if (length % crypto_tfm_alg_blocksize(tfm) != 0) { - CERROR("output length %d mismatch blocksize %d\n", - length, crypto_tfm_alg_blocksize(tfm)); - goto out; - } - - if (crypto_tfm_alg_ivsize(tfm) > 16) { - CERROR("iv size too large %d\n", crypto_tfm_alg_ivsize(tfm)); - goto out; - } - - if (iv) - memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm)); - - memcpy(out, in, length); - buf_to_sg(&sg, out, length); - - if (decrypt) - ret = crypto_cipher_decrypt_iv(tfm, &sg, &sg, length, local_iv); - else - ret = crypto_cipher_encrypt_iv(tfm, &sg, &sg, length, local_iv); - -out: - return(ret); -} - -static inline -int krb5_digest_hmac(struct crypto_tfm *tfm, - rawobj_t *key, - struct krb5_header *khdr, - int msgcnt, rawobj_t *msgs, - rawobj_t *cksum) -{ - struct scatterlist sg[1]; - __u32 keylen = key->len, i; - - crypto_hmac_init(tfm, key->data, &keylen); - - for (i = 0; i < msgcnt; i++) { - if (msgs[i].len == 0) - continue; - buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len); - crypto_hmac_update(tfm, sg, 1); - } - - if (khdr) { - buf_to_sg(sg, (char *) khdr, sizeof(*khdr)); - crypto_hmac_update(tfm, sg, 1); - } - - crypto_hmac_final(tfm, key->data, &keylen, cksum->data); - return 0; -} - -static inline -int krb5_digest_norm(struct crypto_tfm *tfm, - struct krb5_keyblock *kb, - struct krb5_header *khdr, - int msgcnt, rawobj_t *msgs, - rawobj_t *cksum) -{ - struct scatterlist sg[1]; - int i; - - LASSERT(kb->kb_tfm); - - crypto_digest_init(tfm); - - for (i = 0; i < msgcnt; i++) { - if (msgs[i].len == 0) - continue; - buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len); - crypto_digest_update(tfm, sg, 1); - } - - if (khdr) { - buf_to_sg(sg, (char *) khdr, sizeof(*khdr)); - crypto_digest_update(tfm, sg, 1); - } - - crypto_digest_final(tfm, cksum->data); - - return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data, - cksum->data, cksum->len); -} - /* * compute (keyed/keyless) checksum against the plain text which appended * with krb5 wire token header. */ static __s32 krb5_make_checksum(__u32 enctype, - struct krb5_keyblock *kb, - struct krb5_header *khdr, - int msgcnt, rawobj_t *msgs, - rawobj_t *cksum) + struct gss_keyblock *kb, + struct krb5_header *khdr, + int msgcnt, rawobj_t *msgs, + int iovcnt, struct bio_vec *iovs, + rawobj_t *cksum, + digest_hash hash_func) { - struct krb5_enctype *ke = &enctypes[enctype]; - struct crypto_tfm *tfm; - __u32 code = GSS_S_FAILURE; - int rc; - - if (!(tfm = crypto_alloc_tfm(ke->ke_hash_name, 0))) { - CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name); - return GSS_S_FAILURE; - } - - cksum->len = crypto_tfm_alg_digestsize(tfm); - OBD_ALLOC(cksum->data, cksum->len); - if (!cksum->data) { - cksum->len = 0; - goto out_tfm; - } - - if (ke->ke_hash_hmac) - rc = krb5_digest_hmac(tfm, &kb->kb_key, - khdr, msgcnt, msgs, cksum); - else - rc = krb5_digest_norm(tfm, kb, - khdr, msgcnt, msgs, cksum); - - if (rc == 0) - code = GSS_S_COMPLETE; -out_tfm: - crypto_free_tfm(tfm); - return code; + struct krb5_enctype *ke = &enctypes[enctype]; + struct ahash_request *req = NULL; + enum cfs_crypto_hash_alg hash_algo; + rawobj_t hdr; + int rc; + + hash_algo = cfs_crypto_hash_alg(ke->ke_hash_name); + + /* For the cbc(des) case we want md5 instead of hmac(md5) */ + if (strcmp(ke->ke_enc_name, "cbc(des)")) + req = cfs_crypto_hash_init(hash_algo, kb->kb_key.data, + kb->kb_key.len); + else + req = cfs_crypto_hash_init(hash_algo, NULL, 0); + if (IS_ERR(req)) { + rc = PTR_ERR(req); + CERROR("failed to alloc hash %s : rc = %d\n", + ke->ke_hash_name, rc); + goto out_no_hash; + } + + cksum->len = cfs_crypto_hash_digestsize(hash_algo); + OBD_ALLOC_LARGE(cksum->data, cksum->len); + if (!cksum->data) { + cksum->len = 0; + rc = -ENOMEM; + goto out_free_hash; + } + + hdr.data = (__u8 *)khdr; + hdr.len = sizeof(*khdr); + + if (!hash_func) { + rc = -EPROTO; + CERROR("hash function for %s undefined\n", + ke->ke_hash_name); + goto out_free_hash; + } + rc = hash_func(req, &hdr, msgcnt, msgs, iovcnt, iovs); + if (rc) + goto out_free_hash; + + if (!ke->ke_hash_hmac) { + LASSERT(kb->kb_tfm); + + cfs_crypto_hash_final(req, cksum->data, &cksum->len); + rc = gss_crypt_generic(kb->kb_tfm, 0, NULL, + cksum->data, cksum->data, + cksum->len); + goto out_no_hash; + } + +out_free_hash: + if (req) + cfs_crypto_hash_final(req, cksum->data, &cksum->len); +out_no_hash: + return rc ? GSS_S_FAILURE : GSS_S_COMPLETE; } -static -__u32 gss_get_mic_kerberos(struct gss_ctx *gctx, - int msgcnt, - rawobj_t *msgs, - rawobj_t *token) +static void fill_krb5_header(struct krb5_ctx *kctx, + struct krb5_header *khdr, + int privacy) { - struct krb5_ctx *kctx = gctx->internal_ctx_id; - struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; - struct krb5_header *khdr; - unsigned char acceptor_flag; - rawobj_t cksum = RAWOBJ_EMPTY; - __u32 rc = GSS_S_FAILURE; + unsigned char acceptor_flag; acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR; - /* fill krb5 header */ - LASSERT(token->len >= sizeof(*khdr)); - khdr = (struct krb5_header *) token->data; + if (privacy) { + khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG); + khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL; + khdr->kh_ec = cpu_to_be16(0); + khdr->kh_rrc = cpu_to_be16(0); + } else { + khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG); + khdr->kh_flags = acceptor_flag; + khdr->kh_ec = cpu_to_be16(0xffff); + khdr->kh_rrc = cpu_to_be16(0xffff); + } - khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG); - khdr->kh_flags = acceptor_flag; khdr->kh_filler = 0xff; - khdr->kh_ec = cpu_to_be16(0xffff); - khdr->kh_rrc = cpu_to_be16(0xffff); - spin_lock(&krb5_seq_lock); - khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++); - spin_unlock(&krb5_seq_lock); - - /* checksum */ - if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, - khdr, msgcnt, msgs, &cksum)) - goto out_err; - - LASSERT(cksum.len >= ke->ke_hash_size); - LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size); - memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size, - ke->ke_hash_size); - - token->len = sizeof(*khdr) + ke->ke_hash_size; - rc = GSS_S_COMPLETE; -out_err: - rawobj_free(&cksum); - return rc; + spin_lock(&krb5_seq_lock); + khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++); + spin_unlock(&krb5_seq_lock); } -static -__u32 gss_verify_mic_kerberos(struct gss_ctx *gctx, - int msgcnt, - rawobj_t *msgs, - rawobj_t *token) +static __u32 verify_krb5_header(struct krb5_ctx *kctx, + struct krb5_header *khdr, + int privacy) { - struct krb5_ctx *kctx = gctx->internal_ctx_id; - struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; - struct krb5_header *khdr; - unsigned char acceptor_flag; - rawobj_t cksum = RAWOBJ_EMPTY; - __u32 rc = GSS_S_FAILURE; + unsigned char acceptor_flag; + __u16 tok_id, ec_rrc; acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0; - if (token->len < sizeof(*khdr)) { - CERROR("short signature: %u\n", token->len); - return GSS_S_DEFECTIVE_TOKEN; + if (privacy) { + tok_id = KG_TOK_WRAP_MSG; + ec_rrc = 0x0; + } else { + tok_id = KG_TOK_MIC_MSG; + ec_rrc = 0xffff; } - khdr = (struct krb5_header *) token->data; - /* sanity checks */ - if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_MIC_MSG) { + if (be16_to_cpu(khdr->kh_tok_id) != tok_id) { CERROR("bad token id\n"); return GSS_S_DEFECTIVE_TOKEN; } @@ -755,432 +562,966 @@ __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx, CERROR("bad direction flag\n"); return GSS_S_BAD_SIG; } + if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) { + CERROR("missing confidential flag\n"); + return GSS_S_BAD_SIG; + } if (khdr->kh_filler != 0xff) { CERROR("bad filler\n"); return GSS_S_DEFECTIVE_TOKEN; } - if (be16_to_cpu(khdr->kh_ec) != 0xffff || - be16_to_cpu(khdr->kh_rrc) != 0xffff) { + if (be16_to_cpu(khdr->kh_ec) != ec_rrc || + be16_to_cpu(khdr->kh_rrc) != ec_rrc) { CERROR("bad EC or RRC\n"); return GSS_S_DEFECTIVE_TOKEN; } + return GSS_S_COMPLETE; +} - if (token->len < sizeof(*khdr) + ke->ke_hash_size) { - CERROR("short signature: %u, require %d\n", - token->len, (int) sizeof(*khdr) + ke->ke_hash_size); - goto out; - } - - if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, - khdr, msgcnt, msgs, &cksum)) - return GSS_S_FAILURE; - - LASSERT(cksum.len >= ke->ke_hash_size); - if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size, - ke->ke_hash_size)) { - CERROR("checksum mismatch\n"); - rc = GSS_S_BAD_SIG; - goto out; - } +static +__u32 gss_get_mic_kerberos(struct gss_ctx *gctx, + int msgcnt, + rawobj_t *msgs, + int iovcnt, + struct bio_vec *iovs, + rawobj_t *token) +{ + struct krb5_ctx *kctx = gctx->internal_ctx_id; + struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; + struct krb5_header *khdr; + rawobj_t cksum = RAWOBJ_EMPTY; + u32 major; + + /* fill krb5 header */ + LASSERT(token->len >= sizeof(*khdr)); + khdr = (struct krb5_header *)token->data; + fill_krb5_header(kctx, khdr, 0); + + /* checksum */ + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, khdr, + msgcnt, msgs, iovcnt, iovs, &cksum, + gctx->hash_func)) + GOTO(out_free_cksum, major = GSS_S_FAILURE); + + LASSERT(cksum.len >= ke->ke_hash_size); + LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size); + memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size, + ke->ke_hash_size); + + token->len = sizeof(*khdr) + ke->ke_hash_size; + major = GSS_S_COMPLETE; +out_free_cksum: + rawobj_free(&cksum); + return major; +} - rc = GSS_S_COMPLETE; +static +__u32 gss_verify_mic_kerberos(struct gss_ctx *gctx, + int msgcnt, + rawobj_t *msgs, + int iovcnt, + struct bio_vec *iovs, + rawobj_t *token) +{ + struct krb5_ctx *kctx = gctx->internal_ctx_id; + struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; + struct krb5_header *khdr; + rawobj_t cksum = RAWOBJ_EMPTY; + u32 major; + + if (token->len < sizeof(*khdr)) { + CERROR("short signature: %u\n", token->len); + return GSS_S_DEFECTIVE_TOKEN; + } + + khdr = (struct krb5_header *)token->data; + + major = verify_krb5_header(kctx, khdr, 0); + if (major != GSS_S_COMPLETE) { + CERROR("bad krb5 header\n"); + goto out; + } + + if (token->len < sizeof(*khdr) + ke->ke_hash_size) { + CERROR("short signature: %u, require %d\n", + token->len, (int) sizeof(*khdr) + ke->ke_hash_size); + GOTO(out, major = GSS_S_FAILURE); + } + + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, + khdr, msgcnt, msgs, iovcnt, iovs, &cksum, + gctx->hash_func)) + GOTO(out_free_cksum, major = GSS_S_FAILURE); + + LASSERT(cksum.len >= ke->ke_hash_size); + if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size, + ke->ke_hash_size)) { + CERROR("checksum mismatch\n"); + GOTO(out_free_cksum, major = GSS_S_BAD_SIG); + } + major = GSS_S_COMPLETE; +out_free_cksum: + rawobj_free(&cksum); out: - rawobj_free(&cksum); - return rc; + return major; } +/* + * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size. + */ static -int add_padding(rawobj_t *msg, int msg_buflen, int blocksize) +int krb5_encrypt_bulk(struct crypto_sync_skcipher *tfm, + struct krb5_header *khdr, + char *confounder, + struct ptlrpc_bulk_desc *desc, + rawobj_t *cipher, + int adj_nob) { - int padding; - - padding = (blocksize - (msg->len & (blocksize - 1))) & - (blocksize - 1); - if (!padding) - return 0; + __u8 local_iv[16] = {0}; + struct scatterlist src, dst; + struct sg_table sg_src, sg_dst; + int blocksize, i, rc, nob = 0; + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); + + LASSERT(desc->bd_iov_count); + LASSERT(desc->bd_enc_vec); + + blocksize = crypto_sync_skcipher_blocksize(tfm); + LASSERT(blocksize > 1); + LASSERT(cipher->len == blocksize + sizeof(*khdr)); + + /* encrypt confounder */ + rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize); + if (rc != 0) + return rc; + + rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize); + if (rc != 0) { + gss_teardown_sgtable(&sg_src); + return rc; + } + skcipher_request_set_sync_tfm(req, tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl, + blocksize, local_iv); + + rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize); + + gss_teardown_sgtable(&sg_dst); + gss_teardown_sgtable(&sg_src); + + if (rc) { + CERROR("error to encrypt confounder: %d\n", rc); + skcipher_request_zero(req); + return rc; + } + + /* encrypt clear pages */ + for (i = 0; i < desc->bd_iov_count; i++) { + sg_init_table(&src, 1); + sg_set_page(&src, desc->bd_vec[i].bv_page, + (desc->bd_vec[i].bv_len + + blocksize - 1) & + (~(blocksize - 1)), + desc->bd_vec[i].bv_offset); + if (adj_nob) + nob += src.length; + sg_init_table(&dst, 1); + sg_set_page(&dst, desc->bd_enc_vec[i].bv_page, + src.length, src.offset); + + desc->bd_enc_vec[i].bv_offset = dst.offset; + desc->bd_enc_vec[i].bv_len = dst.length; + + skcipher_request_set_crypt(req, &src, &dst, + src.length, local_iv); + rc = crypto_skcipher_encrypt_iv(req, &dst, &src, src.length); + if (rc) { + CERROR("error to encrypt page: %d\n", rc); + skcipher_request_zero(req); + return rc; + } + } + + /* encrypt krb5 header */ + rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr)); + if (rc != 0) { + skcipher_request_zero(req); + return rc; + } + + rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize, + sizeof(*khdr)); + if (rc != 0) { + gss_teardown_sgtable(&sg_src); + skcipher_request_zero(req); + return rc; + } + + skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl, + sizeof(*khdr), local_iv); + rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, + sizeof(*khdr)); + skcipher_request_zero(req); + + gss_teardown_sgtable(&sg_dst); + gss_teardown_sgtable(&sg_src); - if (msg->len + padding > msg_buflen) { - CERROR("bufsize %u too small: datalen %u, padding %u\n", - msg_buflen, msg->len, padding); - return -EINVAL; + if (rc) { + CERROR("error to encrypt krb5 header: %d\n", rc); + return rc; } - memset(msg->data + msg->len, padding, padding); - msg->len += padding; + if (adj_nob) + desc->bd_nob = nob; + return 0; } +/* + * desc->bd_nob_transferred is the size of cipher text received. + * desc->bd_nob is the target size of plain text supposed to be. + * + * if adj_nob != 0, we adjust each page's bv_len to the actual + * plain text size. + * - for client read: we don't know data size for each page, so + * bd_iov[]->bv_len is set to PAGE_SIZE, but actual data received might + * be smaller, so we need to adjust it according to + * bd_u.bd_kiov.bd_enc_vec[]->bv_len. + * this means we DO NOT support the situation that server send an odd size + * data in a page which is not the last one. + * - for server write: we knows exactly data size for each page being expected, + * thus bv_len is accurate already, so we should not adjust it at all. + * and bd_u.bd_kiov.bd_enc_vec[]->bv_len should be + * round_up(bd_iov[]->bv_len) which + * should have been done by prep_bulk(). + */ static -int krb5_encrypt_rawobjs(struct crypto_tfm *tfm, - int mode_ecb, - int inobj_cnt, - rawobj_t *inobjs, - rawobj_t *outobj, - int enc) +int krb5_decrypt_bulk(struct crypto_sync_skcipher *tfm, + struct krb5_header *khdr, + struct ptlrpc_bulk_desc *desc, + rawobj_t *cipher, + rawobj_t *plain, + int adj_nob) { - struct scatterlist src, dst; - __u8 local_iv[16] = {0}, *buf; - __u32 datalen = 0; - int i, rc; - ENTRY; - - buf = outobj->data; - - for (i = 0; i < inobj_cnt; i++) { - LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len); - - buf_to_sg(&src, inobjs[i].data, inobjs[i].len); - buf_to_sg(&dst, buf, outobj->len - datalen); - - if (mode_ecb) { - if (enc) - rc = crypto_cipher_encrypt( - tfm, &dst, &src, src.length); - else - rc = crypto_cipher_decrypt( - tfm, &dst, &src, src.length); - } else { - if (enc) - rc = crypto_cipher_encrypt_iv( - tfm, &dst, &src, src.length, local_iv); - else - rc = crypto_cipher_decrypt_iv( - tfm, &dst, &src, src.length, local_iv); - } - - if (rc) { - CERROR("encrypt error %d\n", rc); - RETURN(rc); - } - - datalen += inobjs[i].len; - buf += inobjs[i].len; - } - - outobj->len = datalen; - RETURN(0); + __u8 local_iv[16] = {0}; + struct scatterlist src, dst; + struct sg_table sg_src, sg_dst; + int ct_nob = 0, pt_nob = 0; + int blocksize, i, rc; + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); + + LASSERT(desc->bd_iov_count); + LASSERT(desc->bd_enc_vec); + LASSERT(desc->bd_nob_transferred); + + blocksize = crypto_sync_skcipher_blocksize(tfm); + LASSERT(blocksize > 1); + LASSERT(cipher->len == blocksize + sizeof(*khdr)); + + if (desc->bd_nob_transferred % blocksize) { + CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred); + return -EPROTO; + } + + /* decrypt head (confounder) */ + rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize); + if (rc != 0) + return rc; + + rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize); + if (rc != 0) { + gss_teardown_sgtable(&sg_src); + return rc; + } + + skcipher_request_set_sync_tfm(req, tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl, + blocksize, local_iv); + + rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize); + + gss_teardown_sgtable(&sg_dst); + gss_teardown_sgtable(&sg_src); + + if (rc) { + CERROR("error to decrypt confounder: %d\n", rc); + skcipher_request_zero(req); + return rc; + } + + for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred; + i++) { + if (desc->bd_enc_vec[i].bv_offset % blocksize != 0 || + desc->bd_enc_vec[i].bv_len % blocksize != 0) { + CERROR("page %d: odd offset %u len %u, blocksize %d\n", + i, desc->bd_enc_vec[i].bv_offset, + desc->bd_enc_vec[i].bv_len, + blocksize); + skcipher_request_zero(req); + return -EFAULT; + } + + if (adj_nob) { + if (ct_nob + desc->bd_enc_vec[i].bv_len > + desc->bd_nob_transferred) + desc->bd_enc_vec[i].bv_len = + desc->bd_nob_transferred - ct_nob; + + desc->bd_vec[i].bv_len = + desc->bd_enc_vec[i].bv_len; + if (pt_nob + desc->bd_enc_vec[i].bv_len > + desc->bd_nob) + desc->bd_vec[i].bv_len = + desc->bd_nob - pt_nob; + } else { + /* this should be guaranteed by LNET */ + LASSERT(ct_nob + desc->bd_enc_vec[i]. + bv_len <= + desc->bd_nob_transferred); + LASSERT(desc->bd_vec[i].bv_len <= + desc->bd_enc_vec[i].bv_len); + } + + if (desc->bd_enc_vec[i].bv_len == 0) + continue; + + sg_init_table(&src, 1); + sg_set_page(&src, desc->bd_enc_vec[i].bv_page, + desc->bd_enc_vec[i].bv_len, + desc->bd_enc_vec[i].bv_offset); + dst = src; + if (desc->bd_vec[i].bv_len % blocksize == 0) + sg_assign_page(&dst, + desc->bd_vec[i].bv_page); + + skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl, + src.length, local_iv); + rc = crypto_skcipher_decrypt_iv(req, &dst, &src, src.length); + if (rc) { + CERROR("error to decrypt page: %d\n", rc); + skcipher_request_zero(req); + return rc; + } + + if (desc->bd_vec[i].bv_len % blocksize != 0) { + memcpy(page_address(desc->bd_vec[i].bv_page) + + desc->bd_vec[i].bv_offset, + page_address(desc->bd_enc_vec[i]. + bv_page) + + desc->bd_vec[i].bv_offset, + desc->bd_vec[i].bv_len); + } + + ct_nob += desc->bd_enc_vec[i].bv_len; + pt_nob += desc->bd_vec[i].bv_len; + } + + if (unlikely(ct_nob != desc->bd_nob_transferred)) { + CERROR("%d cipher text transferred but only %d decrypted\n", + desc->bd_nob_transferred, ct_nob); + skcipher_request_zero(req); + return -EFAULT; + } + + if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) { + CERROR("%d plain text expected but only %d received\n", + desc->bd_nob, pt_nob); + skcipher_request_zero(req); + return -EFAULT; + } + + /* if needed, clear up the rest unused iovs */ + if (adj_nob) + while (i < desc->bd_iov_count) + desc->bd_vec[i++].bv_len = 0; + + /* decrypt tail (krb5 header) */ + rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize, + sizeof(*khdr)); + if (rc != 0) + return rc; + + rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize, + sizeof(*khdr)); + if (rc != 0) { + gss_teardown_sgtable(&sg_src); + return rc; + } + + skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl, + src.length, local_iv); + rc = crypto_skcipher_decrypt_iv(req, sg_dst.sgl, sg_src.sgl, + sizeof(*khdr)); + gss_teardown_sgtable(&sg_src); + gss_teardown_sgtable(&sg_dst); + + skcipher_request_zero(req); + if (rc) { + CERROR("error to decrypt tail: %d\n", rc); + return rc; + } + + if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) { + CERROR("krb5 header doesn't match\n"); + return -EACCES; + } + + return 0; } static __u32 gss_wrap_kerberos(struct gss_ctx *gctx, - rawobj_t *msg, - int msg_buflen, - rawobj_t *token) + rawobj_t *gsshdr, + rawobj_t *msg, + int msg_buflen, + rawobj_t *token) { - struct krb5_ctx *kctx = gctx->internal_ctx_id; - struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; - struct krb5_header *khdr; - unsigned char acceptor_flag; - int blocksize; - rawobj_t cksum = RAWOBJ_EMPTY; - rawobj_t data_desc[3], cipher; - __u8 conf[GSS_MAX_CIPHER_BLOCK]; - int enc_rc = 0; - - LASSERT(ke); - LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK); - LASSERT(kctx->kc_keye.kb_tfm == NULL || - ke->ke_conf_size >= - crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm)); - - acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR; - - /* fill krb5 header */ - LASSERT(token->len >= sizeof(*khdr)); - khdr = (struct krb5_header *) token->data; - - khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG); - khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL; - khdr->kh_filler = 0xff; - khdr->kh_ec = cpu_to_be16(0); - khdr->kh_rrc = cpu_to_be16(0); - spin_lock(&krb5_seq_lock); - khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++); - spin_unlock(&krb5_seq_lock); - - /* generate confounder */ - get_random_bytes(conf, ke->ke_conf_size); - - /* get encryption blocksize. note kc_keye might not associated with - * a tfm, currently only for arcfour-hmac - */ - if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { - LASSERT(kctx->kc_keye.kb_tfm == NULL); - blocksize = 1; - } else { - LASSERT(kctx->kc_keye.kb_tfm); - blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm); - } - LASSERT(blocksize <= ke->ke_conf_size); - - /* padding the message */ - if (add_padding(msg, msg_buflen, blocksize)) - return GSS_S_FAILURE; - - /* - * clear text layout, same for both checksum & encryption: - * ----------------------------------------- - * | confounder | clear msgs | krb5 header | - * ----------------------------------------- - */ - data_desc[0].data = conf; - data_desc[0].len = ke->ke_conf_size; - data_desc[1].data = msg->data; - data_desc[1].len = msg->len; - data_desc[2].data = (__u8 *) khdr; - data_desc[2].len = sizeof(*khdr); - - /* compute checksum */ - if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, - khdr, 3, data_desc, &cksum)) - return GSS_S_FAILURE; - LASSERT(cksum.len >= ke->ke_hash_size); - - /* encrypting, cipher text will be directly inplace */ - cipher.data = (__u8 *) (khdr + 1); - cipher.len = token->len - sizeof(*khdr); - LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr)); - - if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { - rawobj_t arc4_keye; - struct crypto_tfm *arc4_tfm; - - if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, - NULL, 1, &cksum, &arc4_keye)) { - CERROR("failed to obtain arc4 enc key\n"); - GOTO(arc4_out, enc_rc = -EACCES); - } - - arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB); - if (arc4_tfm == NULL) { - CERROR("failed to alloc tfm arc4 in ECB mode\n"); - GOTO(arc4_out_key, enc_rc = -EACCES); - } - - if (crypto_cipher_setkey(arc4_tfm, - arc4_keye.data, arc4_keye.len)) { - CERROR("failed to set arc4 key, len %d\n", - arc4_keye.len); - GOTO(arc4_out_tfm, enc_rc = -EACCES); - } - - enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1, - 3, data_desc, &cipher, 1); + struct krb5_ctx *kctx = gctx->internal_ctx_id; + struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; + struct krb5_header *khdr; + int blocksize; + rawobj_t cksum = RAWOBJ_EMPTY; + rawobj_t data_desc[3], cipher; + __u8 conf[GSS_MAX_CIPHER_BLOCK]; + __u8 local_iv[16] = {0}; + u32 major; + int rc = 0; + + LASSERT(ke); + LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK); + LASSERT(kctx->kc_keye.kb_tfm == NULL || + ke->ke_conf_size >= + crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm)); + + /* + * final token format: + * --------------------------------------------------- + * | krb5 header | cipher text | checksum (16 bytes) | + * --------------------------------------------------- + */ + + /* fill krb5 header */ + LASSERT(token->len >= sizeof(*khdr)); + khdr = (struct krb5_header *)token->data; + fill_krb5_header(kctx, khdr, 1); + + /* generate confounder */ + get_random_bytes(conf, ke->ke_conf_size); + + /* get encryption blocksize. note kc_keye might not associated with + * a tfm, currently only for arcfour-hmac */ + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + LASSERT(kctx->kc_keye.kb_tfm == NULL); + blocksize = 1; + } else { + LASSERT(kctx->kc_keye.kb_tfm); + blocksize = crypto_sync_skcipher_blocksize( + kctx->kc_keye.kb_tfm); + } + LASSERT(blocksize <= ke->ke_conf_size); + + /* padding the message */ + if (gss_add_padding(msg, msg_buflen, blocksize)) + return GSS_S_FAILURE; + + /* + * clear text layout for checksum: + * ------------------------------------------------------ + * | confounder | gss header | clear msgs | krb5 header | + * ------------------------------------------------------ + */ + data_desc[0].data = conf; + data_desc[0].len = ke->ke_conf_size; + data_desc[1].data = gsshdr->data; + data_desc[1].len = gsshdr->len; + data_desc[2].data = msg->data; + data_desc[2].len = msg->len; + + /* compute checksum */ + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 3, data_desc, 0, NULL, &cksum, + gctx->hash_func)) + GOTO(out_free_cksum, major = GSS_S_FAILURE); + LASSERT(cksum.len >= ke->ke_hash_size); + + /* + * clear text layout for encryption: + * ----------------------------------------- + * | confounder | clear msgs | krb5 header | + * ----------------------------------------- + */ + data_desc[0].data = conf; + data_desc[0].len = ke->ke_conf_size; + data_desc[1].data = msg->data; + data_desc[1].len = msg->len; + data_desc[2].data = (__u8 *) khdr; + data_desc[2].len = sizeof(*khdr); + + /* cipher text will be directly inplace */ + cipher.data = (__u8 *)(khdr + 1); + cipher.len = token->len - sizeof(*khdr); + LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr)); + + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + rawobj_t arc4_keye = RAWOBJ_EMPTY; + struct crypto_sync_skcipher *arc4_tfm; + + if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, + NULL, 1, &cksum, 0, NULL, &arc4_keye, + gctx->hash_func)) { + CERROR("failed to obtain arc4 enc key\n"); + GOTO(arc4_out_key, rc = -EACCES); + } + + arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); + if (IS_ERR(arc4_tfm)) { + CERROR("failed to alloc tfm arc4 in ECB mode\n"); + GOTO(arc4_out_key, rc = -EACCES); + } + + if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data, + arc4_keye.len)) { + CERROR("failed to set arc4 key, len %d\n", + arc4_keye.len); + GOTO(arc4_out_tfm, rc = -EACCES); + } + + rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc, + &cipher, 1); arc4_out_tfm: - crypto_free_tfm(arc4_tfm); + crypto_free_sync_skcipher(arc4_tfm); arc4_out_key: - rawobj_free(&arc4_keye); -arc4_out: - do {} while(0); /* just to avoid compile warning */ - } else { - enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0, - 3, data_desc, &cipher, 1); - } - - if (enc_rc != 0) { - rawobj_free(&cksum); - return GSS_S_FAILURE; - } + rawobj_free(&arc4_keye); + } else { + rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3, + data_desc, &cipher, 1); + } + + if (rc) + GOTO(out_free_cksum, major = GSS_S_FAILURE); + + /* fill in checksum */ + LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size); + memcpy((char *)(khdr + 1) + cipher.len, + cksum.data + cksum.len - ke->ke_hash_size, + ke->ke_hash_size); + + /* final token length */ + token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size; + major = GSS_S_COMPLETE; +out_free_cksum: + rawobj_free(&cksum); + return major; +} - /* fill in checksum */ - LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size); - memcpy((char *)(khdr + 1) + cipher.len, - cksum.data + cksum.len - ke->ke_hash_size, - ke->ke_hash_size); - rawobj_free(&cksum); +static +__u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx, + struct ptlrpc_bulk_desc *desc) +{ + struct krb5_ctx *kctx = gctx->internal_ctx_id; + int blocksize, i; + + LASSERT(desc->bd_iov_count); + LASSERT(desc->bd_enc_vec); + LASSERT(kctx->kc_keye.kb_tfm); + + blocksize = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm); + + for (i = 0; i < desc->bd_iov_count; i++) { + LASSERT(desc->bd_enc_vec[i].bv_page); + /* + * offset should always start at page boundary of either + * client or server side. + */ + if (desc->bd_vec[i].bv_offset & blocksize) { + CERROR("odd offset %d in page %d\n", + desc->bd_vec[i].bv_offset, i); + return GSS_S_FAILURE; + } + + desc->bd_enc_vec[i].bv_offset = + desc->bd_vec[i].bv_offset; + desc->bd_enc_vec[i].bv_len = + (desc->bd_vec[i].bv_len + + blocksize - 1) & (~(blocksize - 1)); + } + + return GSS_S_COMPLETE; +} - /* final token length */ - token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size; - return GSS_S_COMPLETE; +static +__u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, + struct ptlrpc_bulk_desc *desc, + rawobj_t *token, int adj_nob) +{ + struct krb5_ctx *kctx = gctx->internal_ctx_id; + struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; + struct krb5_header *khdr; + int blocksz; + rawobj_t cksum = RAWOBJ_EMPTY; + rawobj_t data_desc[1], cipher; + __u8 conf[GSS_MAX_CIPHER_BLOCK]; + int rc = 0; + u32 major; + + LASSERT(ke); + LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK); + + /* + * final token format: + * -------------------------------------------------- + * | krb5 header | head/tail cipher text | checksum | + * -------------------------------------------------- + */ + + /* fill krb5 header */ + LASSERT(token->len >= sizeof(*khdr)); + khdr = (struct krb5_header *)token->data; + fill_krb5_header(kctx, khdr, 1); + + /* generate confounder */ + get_random_bytes(conf, ke->ke_conf_size); + + /* get encryption blocksize. note kc_keye might not associated with + * a tfm, currently only for arcfour-hmac */ + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + LASSERT(kctx->kc_keye.kb_tfm == NULL); + blocksz = 1; + } else { + LASSERT(kctx->kc_keye.kb_tfm); + blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm); + } + + /* + * we assume the size of krb5_header (16 bytes) must be n * blocksize. + * the bulk token size would be exactly (sizeof(krb5_header) + + * blocksize + sizeof(krb5_header) + hashsize) + */ + LASSERT(blocksz <= ke->ke_conf_size); + LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0); + LASSERT(token->len >= sizeof(*khdr) + blocksz + sizeof(*khdr) + 16); + + /* + * clear text layout for checksum: + * ------------------------------------------ + * | confounder | clear pages | krb5 header | + * ------------------------------------------ + */ + data_desc[0].data = conf; + data_desc[0].len = ke->ke_conf_size; + + /* compute checksum */ + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 1, data_desc, + desc->bd_iov_count, desc->bd_vec, + &cksum, gctx->hash_func)) + GOTO(out_free_cksum, major = GSS_S_FAILURE); + LASSERT(cksum.len >= ke->ke_hash_size); + + /* + * clear text layout for encryption: + * ------------------------------------------ + * | confounder | clear pages | krb5 header | + * ------------------------------------------ + * | | | + * ---------- (cipher pages) | + * result token: | | + * ------------------------------------------- + * | krb5 header | cipher text | cipher text | + * ------------------------------------------- + */ + data_desc[0].data = conf; + data_desc[0].len = ke->ke_conf_size; + + cipher.data = (__u8 *)(khdr + 1); + cipher.len = blocksz + sizeof(*khdr); + + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + LBUG(); + rc = 0; + } else { + rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr, + conf, desc, &cipher, adj_nob); + } + if (rc) + GOTO(out_free_cksum, major = GSS_S_FAILURE); + + /* fill in checksum */ + LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size); + memcpy((char *)(khdr + 1) + cipher.len, + cksum.data + cksum.len - ke->ke_hash_size, + ke->ke_hash_size); + + /* final token length */ + token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size; + major = GSS_S_COMPLETE; +out_free_cksum: + rawobj_free(&cksum); + return major; } static __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, - rawobj_t *token, - rawobj_t *msg) + rawobj_t *gsshdr, + rawobj_t *token, + rawobj_t *msg) { - struct krb5_ctx *kctx = gctx->internal_ctx_id; - struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; - struct krb5_header *khdr; - unsigned char acceptor_flag; - unsigned char *tmpbuf; - int blocksize, bodysize; - rawobj_t cksum = RAWOBJ_EMPTY; - rawobj_t cipher_in, plain_out; - __u32 rc = GSS_S_FAILURE, enc_rc = 0; - - LASSERT(ke); - - acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0; - - if (token->len < sizeof(*khdr)) { - CERROR("short signature: %u\n", token->len); - return GSS_S_DEFECTIVE_TOKEN; - } - - khdr = (struct krb5_header *) token->data; - - /* sanity check header */ - if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_WRAP_MSG) { - CERROR("bad token id\n"); - return GSS_S_DEFECTIVE_TOKEN; - } - if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) { - CERROR("bad direction flag\n"); - return GSS_S_BAD_SIG; - } - if ((khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) { - CERROR("missing confidential flag\n"); - return GSS_S_BAD_SIG; - } - if (khdr->kh_filler != 0xff) { - CERROR("bad filler\n"); - return GSS_S_DEFECTIVE_TOKEN; - } - if (be16_to_cpu(khdr->kh_ec) != 0x0 || - be16_to_cpu(khdr->kh_rrc) != 0x0) { - CERROR("bad EC or RRC\n"); - return GSS_S_DEFECTIVE_TOKEN; - } - - /* block size */ - if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { - LASSERT(kctx->kc_keye.kb_tfm == NULL); - blocksize = 1; - } else { - LASSERT(kctx->kc_keye.kb_tfm); - blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm); - } - - /* expected token layout: - * ---------------------------------------- - * | krb5 header | cipher text | checksum | - * ---------------------------------------- - */ - bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size; - - if (bodysize % blocksize) { - CERROR("odd bodysize %d\n", bodysize); - return GSS_S_DEFECTIVE_TOKEN; - } - - if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) { - CERROR("incomplete token: bodysize %d\n", bodysize); - return GSS_S_DEFECTIVE_TOKEN; - } - - if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) { - CERROR("buffer too small: %u, require %d\n", - msg->len, bodysize - ke->ke_conf_size); - return GSS_S_FAILURE; - } - - /* decrypting */ - OBD_ALLOC(tmpbuf, bodysize); - if (!tmpbuf) - return GSS_S_FAILURE; - - cipher_in.data = (__u8 *) (khdr + 1); - cipher_in.len = bodysize; - plain_out.data = tmpbuf; - plain_out.len = bodysize; - - if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { - rawobj_t arc4_keye; - struct crypto_tfm *arc4_tfm; - - cksum.data = token->data + token->len - ke->ke_hash_size; - cksum.len = ke->ke_hash_size; - - if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, - NULL, 1, &cksum, &arc4_keye)) { - CERROR("failed to obtain arc4 enc key\n"); - GOTO(arc4_out, enc_rc = -EACCES); - } - - arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB); - if (arc4_tfm == NULL) { - CERROR("failed to alloc tfm arc4 in ECB mode\n"); - GOTO(arc4_out_key, enc_rc = -EACCES); - } - - if (crypto_cipher_setkey(arc4_tfm, - arc4_keye.data, arc4_keye.len)) { - CERROR("failed to set arc4 key, len %d\n", - arc4_keye.len); - GOTO(arc4_out_tfm, enc_rc = -EACCES); - } - - enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1, - 1, &cipher_in, &plain_out, 0); + struct krb5_ctx *kctx = gctx->internal_ctx_id; + struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; + struct krb5_header *khdr; + unsigned char *tmpbuf; + int blocksz, bodysize; + rawobj_t cksum = RAWOBJ_EMPTY; + rawobj_t cipher_in, plain_out; + rawobj_t hash_objs[3]; + int rc = 0; + __u32 major; + __u8 local_iv[16] = {0}; + + LASSERT(ke); + + if (token->len < sizeof(*khdr)) { + CERROR("short signature: %u\n", token->len); + return GSS_S_DEFECTIVE_TOKEN; + } + + khdr = (struct krb5_header *)token->data; + + major = verify_krb5_header(kctx, khdr, 1); + if (major != GSS_S_COMPLETE) { + CERROR("bad krb5 header\n"); + return major; + } + + /* block size */ + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + LASSERT(kctx->kc_keye.kb_tfm == NULL); + blocksz = 1; + } else { + LASSERT(kctx->kc_keye.kb_tfm); + blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm); + } + + /* expected token layout: + * ---------------------------------------- + * | krb5 header | cipher text | checksum | + * ---------------------------------------- + */ + bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size; + + if (bodysize % blocksz) { + CERROR("odd bodysize %d\n", bodysize); + return GSS_S_DEFECTIVE_TOKEN; + } + + if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) { + CERROR("incomplete token: bodysize %d\n", bodysize); + return GSS_S_DEFECTIVE_TOKEN; + } + + if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) { + CERROR("buffer too small: %u, require %d\n", + msg->len, bodysize - ke->ke_conf_size); + return GSS_S_FAILURE; + } + + /* decrypting */ + OBD_ALLOC_LARGE(tmpbuf, bodysize); + if (!tmpbuf) + return GSS_S_FAILURE; + + major = GSS_S_FAILURE; + + cipher_in.data = (__u8 *)(khdr + 1); + cipher_in.len = bodysize; + plain_out.data = tmpbuf; + plain_out.len = bodysize; + + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + rawobj_t arc4_keye; + struct crypto_sync_skcipher *arc4_tfm; + + cksum.data = token->data + token->len - ke->ke_hash_size; + cksum.len = ke->ke_hash_size; + + if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, + NULL, 1, &cksum, 0, NULL, &arc4_keye, + gctx->hash_func)) { + CERROR("failed to obtain arc4 enc key\n"); + GOTO(arc4_out, rc = -EACCES); + } + + arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); + if (IS_ERR(arc4_tfm)) { + CERROR("failed to alloc tfm arc4 in ECB mode\n"); + GOTO(arc4_out_key, rc = -EACCES); + } + + if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data, + arc4_keye.len)) { + CERROR("failed to set arc4 key, len %d\n", + arc4_keye.len); + GOTO(arc4_out_tfm, rc = -EACCES); + } + + rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in, + &plain_out, 0); arc4_out_tfm: - crypto_free_tfm(arc4_tfm); + crypto_free_sync_skcipher(arc4_tfm); arc4_out_key: - rawobj_free(&arc4_keye); + rawobj_free(&arc4_keye); arc4_out: - cksum = RAWOBJ_EMPTY; - } else { - enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0, - 1, &cipher_in, &plain_out, 0); - } - - if (enc_rc != 0) { - CERROR("error decrypt\n"); - goto out_free; - } - LASSERT(plain_out.len == bodysize); - - /* expected clear text layout: - * ----------------------------------------- - * | confounder | clear msgs | krb5 header | - * ----------------------------------------- - */ - - /* last part must be identical to the krb5 header */ - if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr), - sizeof(*khdr))) { - CERROR("decrypted header mismatch\n"); - goto out_free; - } - - /* verify checksum */ - if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, - khdr, 1, &plain_out, &cksum)) - goto out_free; - - LASSERT(cksum.len >= ke->ke_hash_size); - if (memcmp((char *)(khdr + 1) + bodysize, - cksum.data + cksum.len - ke->ke_hash_size, - ke->ke_hash_size)) { - CERROR("cksum mismatch\n"); - goto out_free; - } - - msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr); - memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len); - - rc = GSS_S_COMPLETE; + cksum = RAWOBJ_EMPTY; + } else { + rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1, + &cipher_in, &plain_out, 0); + } + + if (rc != 0) { + CERROR("error decrypt\n"); + goto out_free; + } + LASSERT(plain_out.len == bodysize); + + /* expected clear text layout: + * ----------------------------------------- + * | confounder | clear msgs | krb5 header | + * ----------------------------------------- + */ + + /* verify krb5 header in token is not modified */ + if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr), + sizeof(*khdr))) { + CERROR("decrypted krb5 header mismatch\n"); + goto out_free; + } + + /* verify checksum, compose clear text as layout: + * ------------------------------------------------------ + * | confounder | gss header | clear msgs | krb5 header | + * ------------------------------------------------------ + */ + hash_objs[0].len = ke->ke_conf_size; + hash_objs[0].data = plain_out.data; + hash_objs[1].len = gsshdr->len; + hash_objs[1].data = gsshdr->data; + hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr); + hash_objs[2].data = plain_out.data + ke->ke_conf_size; + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 3, hash_objs, 0, NULL, &cksum, + gctx->hash_func)) + goto out_free; + + LASSERT(cksum.len >= ke->ke_hash_size); + if (memcmp((char *)(khdr + 1) + bodysize, + cksum.data + cksum.len - ke->ke_hash_size, + ke->ke_hash_size)) { + CERROR("checksum mismatch\n"); + goto out_free; + } + + msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr); + memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len); + + major = GSS_S_COMPLETE; out_free: - OBD_FREE(tmpbuf, bodysize); - rawobj_free(&cksum); - return rc; + OBD_FREE_LARGE(tmpbuf, bodysize); + rawobj_free(&cksum); + return major; } static -__u32 gss_plain_encrypt_kerberos(struct gss_ctx *ctx, - int length, - void *in_buf, - void *out_buf) +__u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, + struct ptlrpc_bulk_desc *desc, + rawobj_t *token, int adj_nob) { - struct krb5_ctx *kctx = ctx->internal_ctx_id; - __u32 rc; - - rc = krb5_encrypt(kctx->kc_keye.kb_tfm, 0, - NULL, in_buf, out_buf, length); - if (rc) - CERROR("plain encrypt error: %d\n", rc); - - return rc; + struct krb5_ctx *kctx = gctx->internal_ctx_id; + struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; + struct krb5_header *khdr; + int blocksz; + rawobj_t cksum = RAWOBJ_EMPTY; + rawobj_t cipher, plain; + rawobj_t data_desc[1]; + int rc; + __u32 major; + + LASSERT(ke); + + if (token->len < sizeof(*khdr)) { + CERROR("short signature: %u\n", token->len); + return GSS_S_DEFECTIVE_TOKEN; + } + + khdr = (struct krb5_header *)token->data; + + major = verify_krb5_header(kctx, khdr, 1); + if (major != GSS_S_COMPLETE) { + CERROR("bad krb5 header\n"); + return major; + } + + /* block size */ + if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { + LASSERT(kctx->kc_keye.kb_tfm == NULL); + blocksz = 1; + LBUG(); + } else { + LASSERT(kctx->kc_keye.kb_tfm); + blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm); + } + LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0); + + /* + * token format is expected as: + * ----------------------------------------------- + * | krb5 header | head/tail cipher text | cksum | + * ----------------------------------------------- + */ + if (token->len < sizeof(*khdr) + blocksz + sizeof(*khdr) + + ke->ke_hash_size) { + CERROR("short token size: %u\n", token->len); + return GSS_S_DEFECTIVE_TOKEN; + } + + cipher.data = (__u8 *) (khdr + 1); + cipher.len = blocksz + sizeof(*khdr); + plain.data = cipher.data; + plain.len = cipher.len; + + rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr, + desc, &cipher, &plain, adj_nob); + if (rc) + return GSS_S_DEFECTIVE_TOKEN; + + /* + * verify checksum, compose clear text as layout: + * ------------------------------------------ + * | confounder | clear pages | krb5 header | + * ------------------------------------------ + */ + data_desc[0].data = plain.data; + data_desc[0].len = blocksz; + + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 1, data_desc, + desc->bd_iov_count, + desc->bd_vec, + &cksum, gctx->hash_func)) + return GSS_S_FAILURE; + LASSERT(cksum.len >= ke->ke_hash_size); + + if (memcmp(plain.data + blocksz + sizeof(*khdr), + cksum.data + cksum.len - ke->ke_hash_size, + ke->ke_hash_size)) { + CERROR("checksum mismatch\n"); + rawobj_free(&cksum); + return GSS_S_BAD_SIG; + } + + rawobj_free(&cksum); + return GSS_S_COMPLETE; } int gss_display_kerberos(struct gss_ctx *ctx, @@ -1190,7 +1531,7 @@ int gss_display_kerberos(struct gss_ctx *ctx, struct krb5_ctx *kctx = ctx->internal_ctx_id; int written; - written = snprintf(buf, bufsize, "mech: krb5 (%s)\n", + written = snprintf(buf, bufsize, "krb5 (%s)", enctype2str(kctx->kc_enctype)); return written; } @@ -1203,7 +1544,9 @@ static struct gss_api_ops gss_kerberos_ops = { .gss_verify_mic = gss_verify_mic_kerberos, .gss_wrap = gss_wrap_kerberos, .gss_unwrap = gss_unwrap_kerberos, - .gss_plain_encrypt = gss_plain_encrypt_kerberos, + .gss_prep_bulk = gss_prep_bulk_kerberos, + .gss_wrap_bulk = gss_wrap_bulk_kerberos, + .gss_unwrap_bulk = gss_unwrap_bulk_kerberos, .gss_delete_sec_context = gss_delete_sec_context_kerberos, .gss_display = gss_display_kerberos, }; @@ -1235,11 +1578,8 @@ static struct subflavor_desc gss_kerberos_sfs[] = { }, }; -/* - * currently we leave module owner NULL - */ static struct gss_api_mech gss_kerberos_mech = { - .gm_owner = NULL, /*THIS_MODULE, */ + /* .gm_owner uses default NULL value for THIS_MODULE */ .gm_name = "krb5", .gm_oid = (rawobj_t) {9, "\052\206\110\206\367\022\001\002\002"}, @@ -1250,15 +1590,15 @@ static struct gss_api_mech gss_kerberos_mech = { int __init init_kerberos_module(void) { - int status; + int status; - status = lgss_mech_register(&gss_kerberos_mech); - if (status) - CERROR("Failed to register kerberos gss mechanism!\n"); - return status; + status = lgss_mech_register(&gss_kerberos_mech); + if (status) + CERROR("Failed to register kerberos gss mechanism!\n"); + return status; } -void __exit cleanup_kerberos_module(void) +void cleanup_kerberos_module(void) { lgss_mech_unregister(&gss_kerberos_mech); }