1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
5 * Copyright 2004 - 2006, Cluster File Systems, Inc.
7 * Author: Eric Mei <ericm@clusterfs.com>
11 * linux/net/sunrpc/gss_krb5_mech.c
12 * linux/net/sunrpc/gss_krb5_crypto.c
13 * linux/net/sunrpc/gss_krb5_seal.c
14 * linux/net/sunrpc/gss_krb5_seqnum.c
15 * linux/net/sunrpc/gss_krb5_unseal.c
17 * Copyright (c) 2001 The Regents of the University of Michigan.
18 * All rights reserved.
20 * Andy Adamson <andros@umich.edu>
21 * J. Bruce Fields <bfields@umich.edu>
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. Neither the name of the University nor the names of its
33 * contributors may be used to endorse or promote products derived
34 * from this software without specific prior written permission.
36 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
37 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
39 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
41 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
42 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
43 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
44 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
45 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
46 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 # define EXPORT_SYMTAB
53 #define DEBUG_SUBSYSTEM S_SEC
55 #include <linux/init.h>
56 #include <linux/module.h>
57 #include <linux/slab.h>
58 #include <linux/crypto.h>
59 #include <linux/random.h>
61 #include <liblustre.h>
65 #include <obd_class.h>
66 #include <obd_support.h>
67 #include <lustre/lustre_idl.h>
68 #include <lustre_net.h>
69 #include <lustre_import.h>
70 #include <lustre_sec.h>
73 #include "gss_internal.h"
78 spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED;
82 char *ke_enc_name; /* linux tfm name */
83 char *ke_hash_name; /* linux tfm name */
84 int ke_enc_mode; /* linux tfm mode */
85 int ke_hash_size; /* checksum size */
86 int ke_conf_size; /* confounder size */
87 unsigned int ke_hash_hmac:1; /* is hmac? */
91 * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
92 * but currently we simply CBC with padding, because linux doesn't support CTS
93 * yet. this need to be fixed in the future.
95 static struct krb5_enctype enctypes[] = {
96 [ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */
105 [ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */
114 [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */
115 "aes128-cts-hmac-sha1-96",
123 [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */
124 "aes256-cts-hmac-sha1-96",
132 [ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */
143 #define MAX_ENCTYPES sizeof(enctypes)/sizeof(struct krb5_enctype)
145 static const char * enctype2str(__u32 enctype)
147 if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
148 return enctypes[enctype].ke_dispname;
154 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
156 kb->kb_tfm = crypto_alloc_tfm(alg_name, alg_mode);
157 if (kb->kb_tfm == NULL) {
158 CERROR("failed to alloc tfm: %s, mode %d\n",
163 if (crypto_cipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
164 CERROR("failed to set %s key, len %d\n",
165 alg_name, kb->kb_key.len);
173 int krb5_init_keys(struct krb5_ctx *kctx)
175 struct krb5_enctype *ke;
177 if (kctx->kc_enctype >= MAX_ENCTYPES ||
178 enctypes[kctx->kc_enctype].ke_hash_size == 0) {
179 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
183 ke = &enctypes[kctx->kc_enctype];
185 /* tfm arc4 is stateful, user should alloc-use-free by his own */
186 if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
187 keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
190 /* tfm hmac is stateful, user should alloc-use-free by his own */
191 if (ke->ke_hash_hmac == 0 &&
192 keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
194 if (ke->ke_hash_hmac == 0 &&
195 keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
202 void keyblock_free(struct krb5_keyblock *kb)
204 rawobj_free(&kb->kb_key);
206 crypto_free_tfm(kb->kb_tfm);
210 int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
212 return rawobj_dup(&new->kb_key, &kb->kb_key);
216 int get_bytes(char **ptr, const char *end, void *res, int len)
221 if (q > end || q < p)
229 int get_rawobj(char **ptr, const char *end, rawobj_t *res)
235 if (get_bytes(&p, end, &len, sizeof(len)))
239 if (q > end || q < p)
242 OBD_ALLOC(res->data, len);
247 memcpy(res->data, p, len);
253 int get_keyblock(char **ptr, const char *end,
254 struct krb5_keyblock *kb, __u32 keysize)
258 OBD_ALLOC(buf, keysize);
262 if (get_bytes(ptr, end, buf, keysize)) {
263 OBD_FREE(buf, keysize);
267 kb->kb_key.len = keysize;
268 kb->kb_key.data = buf;
273 void delete_context_kerberos(struct krb5_ctx *kctx)
275 rawobj_free(&kctx->kc_mech_used);
277 keyblock_free(&kctx->kc_keye);
278 keyblock_free(&kctx->kc_keyi);
279 keyblock_free(&kctx->kc_keyc);
283 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
285 unsigned int tmp_uint, keysize;
288 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
290 kctx->kc_seed_init = (tmp_uint != 0);
293 if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
296 /* sign/seal algorithm, not really used now */
297 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
298 get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
302 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
306 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
308 kctx->kc_seq_send = tmp_uint;
311 if (get_rawobj(&p, end, &kctx->kc_mech_used))
314 /* old style enc/seq keys in format:
318 * we decompose them to fit into the new context
322 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
325 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
328 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
332 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
333 tmp_uint != kctx->kc_enctype)
336 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
340 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
343 /* old style fallback */
344 if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
350 CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
353 return GSS_S_FAILURE;
356 /* Flags for version 2 context flags */
357 #define KRB5_CTX_FLAG_INITIATOR 0x00000001
358 #define KRB5_CTX_FLAG_CFX 0x00000002
359 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
362 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
364 unsigned int tmp_uint, keysize;
367 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
371 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
374 if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
375 kctx->kc_initiate = 1;
376 if (tmp_uint & KRB5_CTX_FLAG_CFX)
378 if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
379 kctx->kc_have_acceptor_subkey = 1;
382 if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
386 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
389 /* size of each key */
390 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
393 /* number of keys - should always be 3 */
394 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
398 CERROR("Invalid number of keys: %u\n", tmp_uint);
403 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
406 if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
409 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
412 CDEBUG(D_SEC, "succesfully imported v2 context\n");
415 return GSS_S_FAILURE;
419 * The whole purpose here is trying to keep user level gss context parsing
420 * from nfs-utils unchanged as possible as we can, they are not quite mature
421 * yet, and many stuff still not clear, like heimdal etc.
424 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
425 struct gss_ctx *gctx)
427 struct krb5_ctx *kctx;
428 char *p = (char *) inbuf->data;
429 char *end = (char *) (inbuf->data + inbuf->len);
430 unsigned int tmp_uint, rc;
432 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
433 CERROR("Fail to read version\n");
434 return GSS_S_FAILURE;
437 /* only support 0, 1 for the moment */
439 CERROR("Invalid version %u\n", tmp_uint);
440 return GSS_S_FAILURE;
445 return GSS_S_FAILURE;
447 if (tmp_uint == 0 || tmp_uint == 1) {
448 kctx->kc_initiate = tmp_uint;
449 rc = import_context_rfc1964(kctx, p, end);
451 rc = import_context_rfc4121(kctx, p, end);
455 rc = krb5_init_keys(kctx);
458 delete_context_kerberos(kctx);
461 return GSS_S_FAILURE;
464 gctx->internal_ctx_id = kctx;
465 return GSS_S_COMPLETE;
469 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
470 struct gss_ctx *gctx_new)
472 struct krb5_ctx *kctx = gctx->internal_ctx_id;
473 struct krb5_ctx *knew;
477 return GSS_S_FAILURE;
479 knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
480 knew->kc_cfx = kctx->kc_cfx;
481 knew->kc_seed_init = kctx->kc_seed_init;
482 knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
484 knew->kc_endtime = kctx->kc_endtime;
486 /* FIXME reverse context don't expire for now */
487 knew->kc_endtime = INT_MAX;
489 memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
490 knew->kc_seq_send = kctx->kc_seq_recv;
491 knew->kc_seq_recv = kctx->kc_seq_send;
492 knew->kc_enctype = kctx->kc_enctype;
494 if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
497 if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
499 if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
501 if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
503 if (krb5_init_keys(knew))
506 gctx_new->internal_ctx_id = knew;
507 CDEBUG(D_SEC, "succesfully copied reverse context\n");
508 return GSS_S_COMPLETE;
511 delete_context_kerberos(knew);
513 return GSS_S_FAILURE;
517 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
518 unsigned long *endtime)
520 struct krb5_ctx *kctx = gctx->internal_ctx_id;
522 *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
523 return GSS_S_COMPLETE;
527 void gss_delete_sec_context_kerberos(void *internal_ctx)
529 struct krb5_ctx *kctx = internal_ctx;
531 delete_context_kerberos(kctx);
536 void buf_to_sg(struct scatterlist *sg, char *ptr, int len)
538 sg->page = virt_to_page(ptr);
539 sg->offset = offset_in_page(ptr);
544 __u32 krb5_encrypt(struct crypto_tfm *tfm,
551 struct scatterlist sg;
552 __u8 local_iv[16] = {0};
557 if (length % crypto_tfm_alg_blocksize(tfm) != 0) {
558 CERROR("output length %d mismatch blocksize %d\n",
559 length, crypto_tfm_alg_blocksize(tfm));
563 if (crypto_tfm_alg_ivsize(tfm) > 16) {
564 CERROR("iv size too large %d\n", crypto_tfm_alg_ivsize(tfm));
569 memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm));
571 memcpy(out, in, length);
572 buf_to_sg(&sg, out, length);
575 ret = crypto_cipher_decrypt_iv(tfm, &sg, &sg, length, local_iv);
577 ret = crypto_cipher_encrypt_iv(tfm, &sg, &sg, length, local_iv);
584 int krb5_digest_hmac(struct crypto_tfm *tfm,
586 struct krb5_header *khdr,
587 int msgcnt, rawobj_t *msgs,
590 struct scatterlist sg[1];
591 __u32 keylen = key->len, i;
593 crypto_hmac_init(tfm, key->data, &keylen);
595 for (i = 0; i < msgcnt; i++) {
596 if (msgs[i].len == 0)
598 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
599 crypto_hmac_update(tfm, sg, 1);
603 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
604 crypto_hmac_update(tfm, sg, 1);
607 crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
612 int krb5_digest_norm(struct crypto_tfm *tfm,
613 struct krb5_keyblock *kb,
614 struct krb5_header *khdr,
615 int msgcnt, rawobj_t *msgs,
618 struct scatterlist sg[1];
623 crypto_digest_init(tfm);
625 for (i = 0; i < msgcnt; i++) {
626 if (msgs[i].len == 0)
628 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
629 crypto_digest_update(tfm, sg, 1);
633 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
634 crypto_digest_update(tfm, sg, 1);
637 crypto_digest_final(tfm, cksum->data);
639 return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
640 cksum->data, cksum->len);
644 * compute (keyed/keyless) checksum against the plain text which appended
645 * with krb5 wire token header.
648 __s32 krb5_make_checksum(__u32 enctype,
649 struct krb5_keyblock *kb,
650 struct krb5_header *khdr,
651 int msgcnt, rawobj_t *msgs,
654 struct krb5_enctype *ke = &enctypes[enctype];
655 struct crypto_tfm *tfm;
656 __u32 code = GSS_S_FAILURE;
659 if (!(tfm = crypto_alloc_tfm(ke->ke_hash_name, 0))) {
660 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
661 return GSS_S_FAILURE;
664 cksum->len = crypto_tfm_alg_digestsize(tfm);
665 OBD_ALLOC(cksum->data, cksum->len);
671 if (ke->ke_hash_hmac)
672 rc = krb5_digest_hmac(tfm, &kb->kb_key,
673 khdr, msgcnt, msgs, cksum);
675 rc = krb5_digest_norm(tfm, kb,
676 khdr, msgcnt, msgs, cksum);
679 code = GSS_S_COMPLETE;
681 crypto_free_tfm(tfm);
686 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
691 struct krb5_ctx *kctx = gctx->internal_ctx_id;
692 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
693 struct krb5_header *khdr;
694 unsigned char acceptor_flag;
695 rawobj_t cksum = RAWOBJ_EMPTY;
696 __u32 rc = GSS_S_FAILURE;
698 acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
700 /* fill krb5 header */
701 LASSERT(token->len >= sizeof(*khdr));
702 khdr = (struct krb5_header *) token->data;
704 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
705 khdr->kh_flags = acceptor_flag;
706 khdr->kh_filler = 0xff;
707 khdr->kh_ec = cpu_to_be16(0xffff);
708 khdr->kh_rrc = cpu_to_be16(0xffff);
709 spin_lock(&krb5_seq_lock);
710 khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
711 spin_unlock(&krb5_seq_lock);
714 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
715 khdr, msgcnt, msgs, &cksum))
718 LASSERT(cksum.len >= ke->ke_hash_size);
719 LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
720 memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
723 token->len = sizeof(*khdr) + ke->ke_hash_size;
731 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
736 struct krb5_ctx *kctx = gctx->internal_ctx_id;
737 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
738 struct krb5_header *khdr;
739 unsigned char acceptor_flag;
740 rawobj_t cksum = RAWOBJ_EMPTY;
741 __u32 rc = GSS_S_FAILURE;
743 acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
745 if (token->len < sizeof(*khdr)) {
746 CERROR("short signature: %u\n", token->len);
747 return GSS_S_DEFECTIVE_TOKEN;
750 khdr = (struct krb5_header *) token->data;
753 if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_MIC_MSG) {
754 CERROR("bad token id\n");
755 return GSS_S_DEFECTIVE_TOKEN;
757 if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
758 CERROR("bad direction flag\n");
759 return GSS_S_BAD_SIG;
761 if (khdr->kh_filler != 0xff) {
762 CERROR("bad filler\n");
763 return GSS_S_DEFECTIVE_TOKEN;
765 if (be16_to_cpu(khdr->kh_ec) != 0xffff ||
766 be16_to_cpu(khdr->kh_rrc) != 0xffff) {
767 CERROR("bad EC or RRC\n");
768 return GSS_S_DEFECTIVE_TOKEN;
771 if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
772 CERROR("short signature: %u, require %d\n",
773 token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
777 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
778 khdr, msgcnt, msgs, &cksum))
779 return GSS_S_FAILURE;
781 LASSERT(cksum.len >= ke->ke_hash_size);
782 if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
784 CERROR("checksum mismatch\n");
796 int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
800 padding = (blocksize - (msg->len & (blocksize - 1))) &
805 if (msg->len + padding > msg_buflen) {
806 CERROR("bufsize %u too small: datalen %u, padding %u\n",
807 msg_buflen, msg->len, padding);
811 memset(msg->data + msg->len, padding, padding);
817 int krb5_encrypt_rawobjs(struct crypto_tfm *tfm,
824 struct scatterlist src, dst;
825 __u8 local_iv[16] = {0}, *buf;
832 for (i = 0; i < inobj_cnt; i++) {
833 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
835 buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
836 buf_to_sg(&dst, buf, outobj->len - datalen);
840 rc = crypto_cipher_encrypt(
841 tfm, &dst, &src, src.length);
843 rc = crypto_cipher_decrypt(
844 tfm, &dst, &src, src.length);
847 rc = crypto_cipher_encrypt_iv(
848 tfm, &dst, &src, src.length, local_iv);
850 rc = crypto_cipher_decrypt_iv(
851 tfm, &dst, &src, src.length, local_iv);
855 CERROR("encrypt error %d\n", rc);
859 datalen += inobjs[i].len;
860 buf += inobjs[i].len;
863 outobj->len = datalen;
868 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
873 struct krb5_ctx *kctx = gctx->internal_ctx_id;
874 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
875 struct krb5_header *khdr;
876 unsigned char acceptor_flag;
878 rawobj_t cksum = RAWOBJ_EMPTY;
879 rawobj_t data_desc[3], cipher;
880 __u8 conf[GSS_MAX_CIPHER_BLOCK];
884 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
885 LASSERT(kctx->kc_keye.kb_tfm == NULL ||
887 crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm));
889 acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
891 /* fill krb5 header */
892 LASSERT(token->len >= sizeof(*khdr));
893 khdr = (struct krb5_header *) token->data;
895 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
896 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
897 khdr->kh_filler = 0xff;
898 khdr->kh_ec = cpu_to_be16(0);
899 khdr->kh_rrc = cpu_to_be16(0);
900 spin_lock(&krb5_seq_lock);
901 khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
902 spin_unlock(&krb5_seq_lock);
904 /* generate confounder */
905 get_random_bytes(conf, ke->ke_conf_size);
907 /* get encryption blocksize. note kc_keye might not associated with
908 * a tfm, currently only for arcfour-hmac
910 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
911 LASSERT(kctx->kc_keye.kb_tfm == NULL);
914 LASSERT(kctx->kc_keye.kb_tfm);
915 blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm);
917 LASSERT(blocksize <= ke->ke_conf_size);
919 /* padding the message */
920 if (add_padding(msg, msg_buflen, blocksize))
921 return GSS_S_FAILURE;
924 * clear text layout, same for both checksum & encryption:
925 * -----------------------------------------
926 * | confounder | clear msgs | krb5 header |
927 * -----------------------------------------
929 data_desc[0].data = conf;
930 data_desc[0].len = ke->ke_conf_size;
931 data_desc[1].data = msg->data;
932 data_desc[1].len = msg->len;
933 data_desc[2].data = (__u8 *) khdr;
934 data_desc[2].len = sizeof(*khdr);
936 /* compute checksum */
937 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
938 khdr, 3, data_desc, &cksum))
939 return GSS_S_FAILURE;
940 LASSERT(cksum.len >= ke->ke_hash_size);
942 /* encrypting, cipher text will be directly inplace */
943 cipher.data = (__u8 *) (khdr + 1);
944 cipher.len = token->len - sizeof(*khdr);
945 LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
947 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
949 struct crypto_tfm *arc4_tfm;
951 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
952 NULL, 1, &cksum, &arc4_keye)) {
953 CERROR("failed to obtain arc4 enc key\n");
954 GOTO(arc4_out, enc_rc = -EACCES);
957 arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB);
958 if (arc4_tfm == NULL) {
959 CERROR("failed to alloc tfm arc4 in ECB mode\n");
960 GOTO(arc4_out_key, enc_rc = -EACCES);
963 if (crypto_cipher_setkey(arc4_tfm,
964 arc4_keye.data, arc4_keye.len)) {
965 CERROR("failed to set arc4 key, len %d\n",
967 GOTO(arc4_out_tfm, enc_rc = -EACCES);
970 enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
971 3, data_desc, &cipher, 1);
973 crypto_free_tfm(arc4_tfm);
975 rawobj_free(&arc4_keye);
977 do {} while(0); /* just to avoid compile warning */
979 enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
980 3, data_desc, &cipher, 1);
985 return GSS_S_FAILURE;
988 /* fill in checksum */
989 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
990 memcpy((char *)(khdr + 1) + cipher.len,
991 cksum.data + cksum.len - ke->ke_hash_size,
995 /* final token length */
996 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
997 return GSS_S_COMPLETE;
1001 __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
1005 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1006 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1007 struct krb5_header *khdr;
1008 unsigned char acceptor_flag;
1009 unsigned char *tmpbuf;
1010 int blocksize, bodysize;
1011 rawobj_t cksum = RAWOBJ_EMPTY;
1012 rawobj_t cipher_in, plain_out;
1013 __u32 rc = GSS_S_FAILURE, enc_rc = 0;
1017 acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
1019 if (token->len < sizeof(*khdr)) {
1020 CERROR("short signature: %u\n", token->len);
1021 return GSS_S_DEFECTIVE_TOKEN;
1024 khdr = (struct krb5_header *) token->data;
1026 /* sanity check header */
1027 if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_WRAP_MSG) {
1028 CERROR("bad token id\n");
1029 return GSS_S_DEFECTIVE_TOKEN;
1031 if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
1032 CERROR("bad direction flag\n");
1033 return GSS_S_BAD_SIG;
1035 if ((khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
1036 CERROR("missing confidential flag\n");
1037 return GSS_S_BAD_SIG;
1039 if (khdr->kh_filler != 0xff) {
1040 CERROR("bad filler\n");
1041 return GSS_S_DEFECTIVE_TOKEN;
1043 if (be16_to_cpu(khdr->kh_ec) != 0x0 ||
1044 be16_to_cpu(khdr->kh_rrc) != 0x0) {
1045 CERROR("bad EC or RRC\n");
1046 return GSS_S_DEFECTIVE_TOKEN;
1050 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1051 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1054 LASSERT(kctx->kc_keye.kb_tfm);
1055 blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm);
1058 /* expected token layout:
1059 * ----------------------------------------
1060 * | krb5 header | cipher text | checksum |
1061 * ----------------------------------------
1063 bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1065 if (bodysize % blocksize) {
1066 CERROR("odd bodysize %d\n", bodysize);
1067 return GSS_S_DEFECTIVE_TOKEN;
1070 if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1071 CERROR("incomplete token: bodysize %d\n", bodysize);
1072 return GSS_S_DEFECTIVE_TOKEN;
1075 if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1076 CERROR("buffer too small: %u, require %d\n",
1077 msg->len, bodysize - ke->ke_conf_size);
1078 return GSS_S_FAILURE;
1082 OBD_ALLOC(tmpbuf, bodysize);
1084 return GSS_S_FAILURE;
1086 cipher_in.data = (__u8 *) (khdr + 1);
1087 cipher_in.len = bodysize;
1088 plain_out.data = tmpbuf;
1089 plain_out.len = bodysize;
1091 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1093 struct crypto_tfm *arc4_tfm;
1095 cksum.data = token->data + token->len - ke->ke_hash_size;
1096 cksum.len = ke->ke_hash_size;
1098 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1099 NULL, 1, &cksum, &arc4_keye)) {
1100 CERROR("failed to obtain arc4 enc key\n");
1101 GOTO(arc4_out, enc_rc = -EACCES);
1104 arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB);
1105 if (arc4_tfm == NULL) {
1106 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1107 GOTO(arc4_out_key, enc_rc = -EACCES);
1110 if (crypto_cipher_setkey(arc4_tfm,
1111 arc4_keye.data, arc4_keye.len)) {
1112 CERROR("failed to set arc4 key, len %d\n",
1114 GOTO(arc4_out_tfm, enc_rc = -EACCES);
1117 enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1118 1, &cipher_in, &plain_out, 0);
1120 crypto_free_tfm(arc4_tfm);
1122 rawobj_free(&arc4_keye);
1124 cksum = RAWOBJ_EMPTY;
1126 enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1127 1, &cipher_in, &plain_out, 0);
1131 CERROR("error decrypt\n");
1134 LASSERT(plain_out.len == bodysize);
1136 /* expected clear text layout:
1137 * -----------------------------------------
1138 * | confounder | clear msgs | krb5 header |
1139 * -----------------------------------------
1142 /* last part must be identical to the krb5 header */
1143 if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1145 CERROR("decrypted header mismatch\n");
1149 /* verify checksum */
1150 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1151 khdr, 1, &plain_out, &cksum))
1154 LASSERT(cksum.len >= ke->ke_hash_size);
1155 if (memcmp((char *)(khdr + 1) + bodysize,
1156 cksum.data + cksum.len - ke->ke_hash_size,
1157 ke->ke_hash_size)) {
1158 CERROR("cksum mismatch\n");
1162 msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr);
1163 memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1165 rc = GSS_S_COMPLETE;
1167 OBD_FREE(tmpbuf, bodysize);
1168 rawobj_free(&cksum);
1173 __u32 gss_plain_encrypt_kerberos(struct gss_ctx *ctx,
1178 struct krb5_ctx *kctx = ctx->internal_ctx_id;
1181 rc = krb5_encrypt(kctx->kc_keye.kb_tfm, 0,
1182 NULL, in_buf, out_buf, length);
1184 CERROR("plain encrypt error: %d\n", rc);
1189 int gss_display_kerberos(struct gss_ctx *ctx,
1193 struct krb5_ctx *kctx = ctx->internal_ctx_id;
1196 written = snprintf(buf, bufsize,
1199 enctype2str(kctx->kc_enctype));
1203 static struct gss_api_ops gss_kerberos_ops = {
1204 .gss_import_sec_context = gss_import_sec_context_kerberos,
1205 .gss_copy_reverse_context = gss_copy_reverse_context_kerberos,
1206 .gss_inquire_context = gss_inquire_context_kerberos,
1207 .gss_get_mic = gss_get_mic_kerberos,
1208 .gss_verify_mic = gss_verify_mic_kerberos,
1209 .gss_wrap = gss_wrap_kerberos,
1210 .gss_unwrap = gss_unwrap_kerberos,
1211 .gss_plain_encrypt = gss_plain_encrypt_kerberos,
1212 .gss_delete_sec_context = gss_delete_sec_context_kerberos,
1213 .gss_display = gss_display_kerberos,
1216 static struct subflavor_desc gss_kerberos_sfs[] = {
1218 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5,
1220 .sf_service = SPTLRPC_SVC_NONE,
1224 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
1226 .sf_service = SPTLRPC_SVC_AUTH,
1230 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5P,
1232 .sf_service = SPTLRPC_SVC_PRIV,
1238 * currently we leave module owner NULL
1240 static struct gss_api_mech gss_kerberos_mech = {
1241 .gm_owner = NULL, /*THIS_MODULE, */
1243 .gm_oid = (rawobj_t)
1244 {9, "\052\206\110\206\367\022\001\002\002"},
1245 .gm_ops = &gss_kerberos_ops,
1247 .gm_sfs = gss_kerberos_sfs,
1250 int __init init_kerberos_module(void)
1254 status = lgss_mech_register(&gss_kerberos_mech);
1256 CERROR("Failed to register kerberos gss mechanism!\n");
1260 void __exit cleanup_kerberos_module(void)
1262 lgss_mech_unregister(&gss_kerberos_mech);