1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
5 * Copyright 2004 - 2006, Cluster File Systems, Inc.
7 * Author: Eric Mei <ericm@clusterfs.com>
11 * linux/net/sunrpc/gss_krb5_mech.c
12 * linux/net/sunrpc/gss_krb5_crypto.c
13 * linux/net/sunrpc/gss_krb5_seal.c
14 * linux/net/sunrpc/gss_krb5_seqnum.c
15 * linux/net/sunrpc/gss_krb5_unseal.c
17 * Copyright (c) 2001 The Regents of the University of Michigan.
18 * All rights reserved.
20 * Andy Adamson <andros@umich.edu>
21 * J. Bruce Fields <bfields@umich.edu>
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. Neither the name of the University nor the names of its
33 * contributors may be used to endorse or promote products derived
34 * from this software without specific prior written permission.
36 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
37 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
39 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
41 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
42 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
43 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
44 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
45 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
46 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 # define EXPORT_SYMTAB
53 #define DEBUG_SUBSYSTEM S_SEC
55 #include <linux/init.h>
56 #include <linux/module.h>
57 #include <linux/slab.h>
58 #include <linux/crypto.h>
59 #include <linux/random.h>
60 #include <linux/mutex.h>
62 #include <liblustre.h>
66 #include <obd_class.h>
67 #include <obd_support.h>
68 #include <lustre/lustre_idl.h>
69 #include <lustre_net.h>
70 #include <lustre_import.h>
71 #include <lustre_sec.h>
74 #include "gss_internal.h"
79 spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED;
83 char *ke_enc_name; /* linux tfm name */
84 char *ke_hash_name; /* linux tfm name */
85 int ke_enc_mode; /* linux tfm mode */
86 int ke_hash_size; /* checksum size */
87 int ke_conf_size; /* confounder size */
88 unsigned int ke_hash_hmac:1; /* is hmac? */
92 * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
93 * but currently we simply CBC with padding, because linux doesn't support CTS
94 * yet. this need to be fixed in the future.
96 static struct krb5_enctype enctypes[] = {
97 [ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */
106 [ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */
115 [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */
116 "aes128-cts-hmac-sha1-96",
124 [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */
125 "aes256-cts-hmac-sha1-96",
133 [ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */
144 #define MAX_ENCTYPES sizeof(enctypes)/sizeof(struct krb5_enctype)
146 static const char * enctype2str(__u32 enctype)
148 if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
149 return enctypes[enctype].ke_dispname;
155 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
157 kb->kb_tfm = crypto_alloc_tfm(alg_name, alg_mode);
158 if (kb->kb_tfm == NULL) {
159 CERROR("failed to alloc tfm: %s, mode %d\n",
164 if (crypto_cipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
165 CERROR("failed to set %s key, len %d\n",
166 alg_name, kb->kb_key.len);
174 int krb5_init_keys(struct krb5_ctx *kctx)
176 struct krb5_enctype *ke;
178 if (kctx->kc_enctype >= MAX_ENCTYPES ||
179 enctypes[kctx->kc_enctype].ke_hash_size == 0) {
180 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
184 ke = &enctypes[kctx->kc_enctype];
186 /* tfm arc4 is stateful, user should alloc-use-free by his own */
187 if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
188 keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
191 /* tfm hmac is stateful, user should alloc-use-free by his own */
192 if (ke->ke_hash_hmac == 0 &&
193 keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
195 if (ke->ke_hash_hmac == 0 &&
196 keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
203 void keyblock_free(struct krb5_keyblock *kb)
205 rawobj_free(&kb->kb_key);
207 crypto_free_tfm(kb->kb_tfm);
211 int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
213 return rawobj_dup(&new->kb_key, &kb->kb_key);
217 int get_bytes(char **ptr, const char *end, void *res, int len)
222 if (q > end || q < p)
230 int get_rawobj(char **ptr, const char *end, rawobj_t *res)
236 if (get_bytes(&p, end, &len, sizeof(len)))
240 if (q > end || q < p)
243 OBD_ALLOC(res->data, len);
248 memcpy(res->data, p, len);
254 int get_keyblock(char **ptr, const char *end,
255 struct krb5_keyblock *kb, __u32 keysize)
259 OBD_ALLOC(buf, keysize);
263 if (get_bytes(ptr, end, buf, keysize)) {
264 OBD_FREE(buf, keysize);
268 kb->kb_key.len = keysize;
269 kb->kb_key.data = buf;
274 void delete_context_kerberos(struct krb5_ctx *kctx)
276 rawobj_free(&kctx->kc_mech_used);
278 keyblock_free(&kctx->kc_keye);
279 keyblock_free(&kctx->kc_keyi);
280 keyblock_free(&kctx->kc_keyc);
284 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
286 unsigned int tmp_uint, keysize;
289 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
291 kctx->kc_seed_init = (tmp_uint != 0);
294 if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
297 /* sign/seal algorithm, not really used now */
298 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
299 get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
303 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
307 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
309 kctx->kc_seq_send = tmp_uint;
312 if (get_rawobj(&p, end, &kctx->kc_mech_used))
315 /* old style enc/seq keys in format:
319 * we decompose them to fit into the new context
323 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
326 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
329 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
333 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
334 tmp_uint != kctx->kc_enctype)
337 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
341 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
344 /* old style fallback */
345 if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
351 CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
354 return GSS_S_FAILURE;
357 /* Flags for version 2 context flags */
358 #define KRB5_CTX_FLAG_INITIATOR 0x00000001
359 #define KRB5_CTX_FLAG_CFX 0x00000002
360 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
363 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
365 unsigned int tmp_uint, keysize;
368 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
372 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
375 if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
376 kctx->kc_initiate = 1;
377 if (tmp_uint & KRB5_CTX_FLAG_CFX)
379 if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
380 kctx->kc_have_acceptor_subkey = 1;
383 if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
387 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
390 /* size of each key */
391 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
394 /* number of keys - should always be 3 */
395 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
399 CERROR("Invalid number of keys: %u\n", tmp_uint);
404 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
407 if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
410 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
413 CDEBUG(D_SEC, "succesfully imported v2 context\n");
416 return GSS_S_FAILURE;
420 * The whole purpose here is trying to keep user level gss context parsing
421 * from nfs-utils unchanged as possible as we can, they are not quite mature
422 * yet, and many stuff still not clear, like heimdal etc.
425 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
426 struct gss_ctx *gctx)
428 struct krb5_ctx *kctx;
429 char *p = (char *) inbuf->data;
430 char *end = (char *) (inbuf->data + inbuf->len);
431 unsigned int tmp_uint, rc;
433 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
434 CERROR("Fail to read version\n");
435 return GSS_S_FAILURE;
438 /* only support 0, 1 for the moment */
440 CERROR("Invalid version %u\n", tmp_uint);
441 return GSS_S_FAILURE;
446 return GSS_S_FAILURE;
448 if (tmp_uint == 0 || tmp_uint == 1) {
449 kctx->kc_initiate = tmp_uint;
450 rc = import_context_rfc1964(kctx, p, end);
452 rc = import_context_rfc4121(kctx, p, end);
456 rc = krb5_init_keys(kctx);
459 delete_context_kerberos(kctx);
462 return GSS_S_FAILURE;
465 gctx->internal_ctx_id = kctx;
466 return GSS_S_COMPLETE;
470 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
471 struct gss_ctx *gctx_new)
473 struct krb5_ctx *kctx = gctx->internal_ctx_id;
474 struct krb5_ctx *knew;
478 return GSS_S_FAILURE;
480 knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
481 knew->kc_cfx = kctx->kc_cfx;
482 knew->kc_seed_init = kctx->kc_seed_init;
483 knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
484 knew->kc_endtime = kctx->kc_endtime;
486 memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
487 knew->kc_seq_send = kctx->kc_seq_recv;
488 knew->kc_seq_recv = kctx->kc_seq_send;
489 knew->kc_enctype = kctx->kc_enctype;
491 if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
494 if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
496 if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
498 if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
500 if (krb5_init_keys(knew))
503 gctx_new->internal_ctx_id = knew;
504 CDEBUG(D_SEC, "succesfully copied reverse context\n");
505 return GSS_S_COMPLETE;
508 delete_context_kerberos(knew);
510 return GSS_S_FAILURE;
514 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
515 unsigned long *endtime)
517 struct krb5_ctx *kctx = gctx->internal_ctx_id;
519 *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
520 return GSS_S_COMPLETE;
524 void gss_delete_sec_context_kerberos(void *internal_ctx)
526 struct krb5_ctx *kctx = internal_ctx;
528 delete_context_kerberos(kctx);
533 void buf_to_sg(struct scatterlist *sg, char *ptr, int len)
535 sg->page = virt_to_page(ptr);
536 sg->offset = offset_in_page(ptr);
541 __u32 krb5_encrypt(struct crypto_tfm *tfm,
548 struct scatterlist sg;
549 __u8 local_iv[16] = {0};
554 if (length % crypto_tfm_alg_blocksize(tfm) != 0) {
555 CERROR("output length %d mismatch blocksize %d\n",
556 length, crypto_tfm_alg_blocksize(tfm));
560 if (crypto_tfm_alg_ivsize(tfm) > 16) {
561 CERROR("iv size too large %d\n", crypto_tfm_alg_ivsize(tfm));
566 memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm));
568 memcpy(out, in, length);
569 buf_to_sg(&sg, out, length);
572 ret = crypto_cipher_decrypt_iv(tfm, &sg, &sg, length, local_iv);
574 ret = crypto_cipher_encrypt_iv(tfm, &sg, &sg, length, local_iv);
581 int krb5_digest_hmac(struct crypto_tfm *tfm,
583 struct krb5_header *khdr,
584 int msgcnt, rawobj_t *msgs,
587 struct scatterlist sg[1];
588 __u32 keylen = key->len, i;
590 crypto_hmac_init(tfm, key->data, &keylen);
592 for (i = 0; i < msgcnt; i++) {
593 if (msgs[i].len == 0)
595 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
596 crypto_hmac_update(tfm, sg, 1);
600 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
601 crypto_hmac_update(tfm, sg, 1);
604 crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
609 int krb5_digest_norm(struct crypto_tfm *tfm,
610 struct krb5_keyblock *kb,
611 struct krb5_header *khdr,
612 int msgcnt, rawobj_t *msgs,
615 struct scatterlist sg[1];
620 crypto_digest_init(tfm);
622 for (i = 0; i < msgcnt; i++) {
623 if (msgs[i].len == 0)
625 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
626 crypto_digest_update(tfm, sg, 1);
630 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
631 crypto_digest_update(tfm, sg, 1);
634 crypto_digest_final(tfm, cksum->data);
636 return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
637 cksum->data, cksum->len);
641 * compute (keyed/keyless) checksum against the plain text which appended
642 * with krb5 wire token header.
645 __s32 krb5_make_checksum(__u32 enctype,
646 struct krb5_keyblock *kb,
647 struct krb5_header *khdr,
648 int msgcnt, rawobj_t *msgs,
651 struct krb5_enctype *ke = &enctypes[enctype];
652 struct crypto_tfm *tfm;
653 __u32 code = GSS_S_FAILURE;
656 if (!(tfm = crypto_alloc_tfm(ke->ke_hash_name, 0))) {
657 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
658 return GSS_S_FAILURE;
661 cksum->len = crypto_tfm_alg_digestsize(tfm);
662 OBD_ALLOC(cksum->data, cksum->len);
668 if (ke->ke_hash_hmac)
669 rc = krb5_digest_hmac(tfm, &kb->kb_key,
670 khdr, msgcnt, msgs, cksum);
672 rc = krb5_digest_norm(tfm, kb,
673 khdr, msgcnt, msgs, cksum);
676 code = GSS_S_COMPLETE;
678 crypto_free_tfm(tfm);
683 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
688 struct krb5_ctx *kctx = gctx->internal_ctx_id;
689 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
690 struct krb5_header *khdr;
691 unsigned char acceptor_flag;
692 rawobj_t cksum = RAWOBJ_EMPTY;
693 __u32 rc = GSS_S_FAILURE;
695 acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
697 /* fill krb5 header */
698 LASSERT(token->len >= sizeof(*khdr));
699 khdr = (struct krb5_header *) token->data;
701 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
702 khdr->kh_flags = acceptor_flag;
703 khdr->kh_filler = 0xff;
704 khdr->kh_ec = cpu_to_be16(0xffff);
705 khdr->kh_rrc = cpu_to_be16(0xffff);
706 spin_lock(&krb5_seq_lock);
707 khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
708 spin_unlock(&krb5_seq_lock);
711 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
712 khdr, msgcnt, msgs, &cksum))
715 LASSERT(cksum.len >= ke->ke_hash_size);
716 LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
717 memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
720 token->len = sizeof(*khdr) + ke->ke_hash_size;
728 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
733 struct krb5_ctx *kctx = gctx->internal_ctx_id;
734 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
735 struct krb5_header *khdr;
736 unsigned char acceptor_flag;
737 rawobj_t cksum = RAWOBJ_EMPTY;
738 __u32 rc = GSS_S_FAILURE;
740 acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
742 if (token->len < sizeof(*khdr)) {
743 CERROR("short signature: %u\n", token->len);
744 return GSS_S_DEFECTIVE_TOKEN;
747 khdr = (struct krb5_header *) token->data;
750 if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_MIC_MSG) {
751 CERROR("bad token id\n");
752 return GSS_S_DEFECTIVE_TOKEN;
754 if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
755 CERROR("bad direction flag\n");
756 return GSS_S_BAD_SIG;
758 if (khdr->kh_filler != 0xff) {
759 CERROR("bad filler\n");
760 return GSS_S_DEFECTIVE_TOKEN;
762 if (be16_to_cpu(khdr->kh_ec) != 0xffff ||
763 be16_to_cpu(khdr->kh_rrc) != 0xffff) {
764 CERROR("bad EC or RRC\n");
765 return GSS_S_DEFECTIVE_TOKEN;
768 if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
769 CERROR("short signature: %u, require %d\n",
770 token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
774 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
775 khdr, msgcnt, msgs, &cksum))
776 return GSS_S_FAILURE;
778 LASSERT(cksum.len >= ke->ke_hash_size);
779 if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
781 CERROR("checksum mismatch\n");
793 int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
797 padding = (blocksize - (msg->len & (blocksize - 1))) &
802 if (msg->len + padding > msg_buflen) {
803 CERROR("bufsize %u too small: datalen %u, padding %u\n",
804 msg_buflen, msg->len, padding);
808 memset(msg->data + msg->len, padding, padding);
814 int krb5_encrypt_rawobjs(struct crypto_tfm *tfm,
821 struct scatterlist src, dst;
822 __u8 local_iv[16] = {0}, *buf;
829 for (i = 0; i < inobj_cnt; i++) {
830 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
832 buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
833 buf_to_sg(&dst, buf, outobj->len - datalen);
837 rc = crypto_cipher_encrypt(
838 tfm, &dst, &src, src.length);
840 rc = crypto_cipher_decrypt(
841 tfm, &dst, &src, src.length);
844 rc = crypto_cipher_encrypt_iv(
845 tfm, &dst, &src, src.length, local_iv);
847 rc = crypto_cipher_decrypt_iv(
848 tfm, &dst, &src, src.length, local_iv);
852 CERROR("encrypt error %d\n", rc);
856 datalen += inobjs[i].len;
857 buf += inobjs[i].len;
860 outobj->len = datalen;
865 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
870 struct krb5_ctx *kctx = gctx->internal_ctx_id;
871 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
872 struct krb5_header *khdr;
873 unsigned char acceptor_flag;
875 rawobj_t cksum = RAWOBJ_EMPTY;
876 rawobj_t data_desc[3], cipher;
877 __u8 conf[GSS_MAX_CIPHER_BLOCK];
881 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
882 LASSERT(kctx->kc_keye.kb_tfm == NULL ||
884 crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm));
886 acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
888 /* fill krb5 header */
889 LASSERT(token->len >= sizeof(*khdr));
890 khdr = (struct krb5_header *) token->data;
892 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
893 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
894 khdr->kh_filler = 0xff;
895 khdr->kh_ec = cpu_to_be16(0);
896 khdr->kh_rrc = cpu_to_be16(0);
897 spin_lock(&krb5_seq_lock);
898 khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
899 spin_unlock(&krb5_seq_lock);
901 /* generate confounder */
902 get_random_bytes(conf, ke->ke_conf_size);
904 /* get encryption blocksize. note kc_keye might not associated with
905 * a tfm, currently only for arcfour-hmac */
906 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
907 LASSERT(kctx->kc_keye.kb_tfm == NULL);
910 LASSERT(kctx->kc_keye.kb_tfm);
911 blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm);
913 LASSERT(blocksize <= ke->ke_conf_size);
915 /* padding the message */
916 if (add_padding(msg, msg_buflen, blocksize))
917 return GSS_S_FAILURE;
920 * clear text layout, same for both checksum & encryption:
921 * -----------------------------------------
922 * | confounder | clear msgs | krb5 header |
923 * -----------------------------------------
925 data_desc[0].data = conf;
926 data_desc[0].len = ke->ke_conf_size;
927 data_desc[1].data = msg->data;
928 data_desc[1].len = msg->len;
929 data_desc[2].data = (__u8 *) khdr;
930 data_desc[2].len = sizeof(*khdr);
932 /* compute checksum */
933 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
934 khdr, 3, data_desc, &cksum))
935 return GSS_S_FAILURE;
936 LASSERT(cksum.len >= ke->ke_hash_size);
938 /* encrypting, cipher text will be directly inplace */
939 cipher.data = (__u8 *) (khdr + 1);
940 cipher.len = token->len - sizeof(*khdr);
941 LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
943 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
945 struct crypto_tfm *arc4_tfm;
947 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
948 NULL, 1, &cksum, &arc4_keye)) {
949 CERROR("failed to obtain arc4 enc key\n");
950 GOTO(arc4_out, enc_rc = -EACCES);
953 arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB);
954 if (arc4_tfm == NULL) {
955 CERROR("failed to alloc tfm arc4 in ECB mode\n");
956 GOTO(arc4_out_key, enc_rc = -EACCES);
959 if (crypto_cipher_setkey(arc4_tfm,
960 arc4_keye.data, arc4_keye.len)) {
961 CERROR("failed to set arc4 key, len %d\n",
963 GOTO(arc4_out_tfm, enc_rc = -EACCES);
966 enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
967 3, data_desc, &cipher, 1);
969 crypto_free_tfm(arc4_tfm);
971 rawobj_free(&arc4_keye);
973 do {} while(0); /* just to avoid compile warning */
975 enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
976 3, data_desc, &cipher, 1);
981 return GSS_S_FAILURE;
984 /* fill in checksum */
985 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
986 memcpy((char *)(khdr + 1) + cipher.len,
987 cksum.data + cksum.len - ke->ke_hash_size,
991 /* final token length */
992 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
993 return GSS_S_COMPLETE;
997 __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
1001 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1002 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1003 struct krb5_header *khdr;
1004 unsigned char acceptor_flag;
1005 unsigned char *tmpbuf;
1006 int blocksize, bodysize;
1007 rawobj_t cksum = RAWOBJ_EMPTY;
1008 rawobj_t cipher_in, plain_out;
1009 __u32 rc = GSS_S_FAILURE, enc_rc = 0;
1013 acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
1015 if (token->len < sizeof(*khdr)) {
1016 CERROR("short signature: %u\n", token->len);
1017 return GSS_S_DEFECTIVE_TOKEN;
1020 khdr = (struct krb5_header *) token->data;
1022 /* sanity check header */
1023 if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_WRAP_MSG) {
1024 CERROR("bad token id\n");
1025 return GSS_S_DEFECTIVE_TOKEN;
1027 if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
1028 CERROR("bad direction flag\n");
1029 return GSS_S_BAD_SIG;
1031 if ((khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
1032 CERROR("missing confidential flag\n");
1033 return GSS_S_BAD_SIG;
1035 if (khdr->kh_filler != 0xff) {
1036 CERROR("bad filler\n");
1037 return GSS_S_DEFECTIVE_TOKEN;
1039 if (be16_to_cpu(khdr->kh_ec) != 0x0 ||
1040 be16_to_cpu(khdr->kh_rrc) != 0x0) {
1041 CERROR("bad EC or RRC\n");
1042 return GSS_S_DEFECTIVE_TOKEN;
1046 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1047 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1050 LASSERT(kctx->kc_keye.kb_tfm);
1051 blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm);
1054 /* expected token layout:
1055 * ----------------------------------------
1056 * | krb5 header | cipher text | checksum |
1057 * ----------------------------------------
1059 bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1061 if (bodysize % blocksize) {
1062 CERROR("odd bodysize %d\n", bodysize);
1063 return GSS_S_DEFECTIVE_TOKEN;
1066 if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1067 CERROR("incomplete token: bodysize %d\n", bodysize);
1068 return GSS_S_DEFECTIVE_TOKEN;
1071 if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1072 CERROR("buffer too small: %u, require %d\n",
1073 msg->len, bodysize - ke->ke_conf_size);
1074 return GSS_S_FAILURE;
1078 OBD_ALLOC(tmpbuf, bodysize);
1080 return GSS_S_FAILURE;
1082 cipher_in.data = (__u8 *) (khdr + 1);
1083 cipher_in.len = bodysize;
1084 plain_out.data = tmpbuf;
1085 plain_out.len = bodysize;
1087 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1089 struct crypto_tfm *arc4_tfm;
1091 cksum.data = token->data + token->len - ke->ke_hash_size;
1092 cksum.len = ke->ke_hash_size;
1094 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1095 NULL, 1, &cksum, &arc4_keye)) {
1096 CERROR("failed to obtain arc4 enc key\n");
1097 GOTO(arc4_out, enc_rc = -EACCES);
1100 arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB);
1101 if (arc4_tfm == NULL) {
1102 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1103 GOTO(arc4_out_key, enc_rc = -EACCES);
1106 if (crypto_cipher_setkey(arc4_tfm,
1107 arc4_keye.data, arc4_keye.len)) {
1108 CERROR("failed to set arc4 key, len %d\n",
1110 GOTO(arc4_out_tfm, enc_rc = -EACCES);
1113 enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1114 1, &cipher_in, &plain_out, 0);
1116 crypto_free_tfm(arc4_tfm);
1118 rawobj_free(&arc4_keye);
1120 cksum = RAWOBJ_EMPTY;
1122 enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1123 1, &cipher_in, &plain_out, 0);
1127 CERROR("error decrypt\n");
1130 LASSERT(plain_out.len == bodysize);
1132 /* expected clear text layout:
1133 * -----------------------------------------
1134 * | confounder | clear msgs | krb5 header |
1135 * -----------------------------------------
1138 /* last part must be identical to the krb5 header */
1139 if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1141 CERROR("decrypted header mismatch\n");
1145 /* verify checksum */
1146 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1147 khdr, 1, &plain_out, &cksum))
1150 LASSERT(cksum.len >= ke->ke_hash_size);
1151 if (memcmp((char *)(khdr + 1) + bodysize,
1152 cksum.data + cksum.len - ke->ke_hash_size,
1153 ke->ke_hash_size)) {
1154 CERROR("cksum mismatch\n");
1158 msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr);
1159 memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1161 rc = GSS_S_COMPLETE;
1163 OBD_FREE(tmpbuf, bodysize);
1164 rawobj_free(&cksum);
1169 __u32 gss_plain_encrypt_kerberos(struct gss_ctx *ctx,
1175 struct krb5_ctx *kctx = ctx->internal_ctx_id;
1178 rc = krb5_encrypt(kctx->kc_keye.kb_tfm, decrypt,
1179 NULL, in_buf, out_buf, length);
1181 CERROR("plain encrypt error: %d\n", rc);
1186 int gss_display_kerberos(struct gss_ctx *ctx,
1190 struct krb5_ctx *kctx = ctx->internal_ctx_id;
1193 written = snprintf(buf, bufsize, "krb5 (%s)",
1194 enctype2str(kctx->kc_enctype));
1198 static struct gss_api_ops gss_kerberos_ops = {
1199 .gss_import_sec_context = gss_import_sec_context_kerberos,
1200 .gss_copy_reverse_context = gss_copy_reverse_context_kerberos,
1201 .gss_inquire_context = gss_inquire_context_kerberos,
1202 .gss_get_mic = gss_get_mic_kerberos,
1203 .gss_verify_mic = gss_verify_mic_kerberos,
1204 .gss_wrap = gss_wrap_kerberos,
1205 .gss_unwrap = gss_unwrap_kerberos,
1206 .gss_plain_encrypt = gss_plain_encrypt_kerberos,
1207 .gss_delete_sec_context = gss_delete_sec_context_kerberos,
1208 .gss_display = gss_display_kerberos,
1211 static struct subflavor_desc gss_kerberos_sfs[] = {
1213 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5N,
1215 .sf_service = SPTLRPC_SVC_NULL,
1219 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5A,
1221 .sf_service = SPTLRPC_SVC_AUTH,
1225 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
1227 .sf_service = SPTLRPC_SVC_INTG,
1231 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5P,
1233 .sf_service = SPTLRPC_SVC_PRIV,
1239 * currently we leave module owner NULL
1241 static struct gss_api_mech gss_kerberos_mech = {
1242 .gm_owner = NULL, /*THIS_MODULE, */
1244 .gm_oid = (rawobj_t)
1245 {9, "\052\206\110\206\367\022\001\002\002"},
1246 .gm_ops = &gss_kerberos_ops,
1248 .gm_sfs = gss_kerberos_sfs,
1251 int __init init_kerberos_module(void)
1255 status = lgss_mech_register(&gss_kerberos_mech);
1257 CERROR("Failed to register kerberos gss mechanism!\n");
1261 void __exit cleanup_kerberos_module(void)
1263 lgss_mech_unregister(&gss_kerberos_mech);