1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
6 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
8 * Copyright (c) 2011, Whamcloud, Inc.
10 * Author: Eric Mei <ericm@clusterfs.com>
14 * linux/net/sunrpc/gss_krb5_mech.c
15 * linux/net/sunrpc/gss_krb5_crypto.c
16 * linux/net/sunrpc/gss_krb5_seal.c
17 * linux/net/sunrpc/gss_krb5_seqnum.c
18 * linux/net/sunrpc/gss_krb5_unseal.c
20 * Copyright (c) 2001 The Regents of the University of Michigan.
21 * All rights reserved.
23 * Andy Adamson <andros@umich.edu>
24 * J. Bruce Fields <bfields@umich.edu>
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. Neither the name of the University nor the names of its
36 * contributors may be used to endorse or promote products derived
37 * from this software without specific prior written permission.
39 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
40 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
41 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
42 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
43 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
44 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
45 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
46 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
47 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
48 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
49 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 # define EXPORT_SYMTAB
56 #define DEBUG_SUBSYSTEM S_SEC
58 #include <linux/init.h>
59 #include <linux/module.h>
60 #include <linux/slab.h>
61 #include <linux/crypto.h>
62 #include <linux/mutex.h>
64 #include <liblustre.h>
68 #include <obd_class.h>
69 #include <obd_support.h>
70 #include <lustre/lustre_idl.h>
71 #include <lustre_net.h>
72 #include <lustre_import.h>
73 #include <lustre_sec.h>
76 #include "gss_internal.h"
81 static cfs_spinlock_t krb5_seq_lock;
85 char *ke_enc_name; /* linux tfm name */
86 char *ke_hash_name; /* linux tfm name */
87 int ke_enc_mode; /* linux tfm mode */
88 int ke_hash_size; /* checksum size */
89 int ke_conf_size; /* confounder size */
90 unsigned int ke_hash_hmac:1; /* is hmac? */
94 * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
95 * but currently we simply CBC with padding, because linux doesn't support CTS
96 * yet. this need to be fixed in the future.
98 static struct krb5_enctype enctypes[] = {
99 [ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */
108 [ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */
117 [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */
118 "aes128-cts-hmac-sha1-96",
126 [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */
127 "aes256-cts-hmac-sha1-96",
135 [ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */
146 #define MAX_ENCTYPES sizeof(enctypes)/sizeof(struct krb5_enctype)
148 static const char * enctype2str(__u32 enctype)
150 if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
151 return enctypes[enctype].ke_dispname;
157 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
159 kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
160 if (kb->kb_tfm == NULL) {
161 CERROR("failed to alloc tfm: %s, mode %d\n",
166 if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
167 CERROR("failed to set %s key, len %d\n",
168 alg_name, kb->kb_key.len);
176 int krb5_init_keys(struct krb5_ctx *kctx)
178 struct krb5_enctype *ke;
180 if (kctx->kc_enctype >= MAX_ENCTYPES ||
181 enctypes[kctx->kc_enctype].ke_hash_size == 0) {
182 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
186 ke = &enctypes[kctx->kc_enctype];
188 /* tfm arc4 is stateful, user should alloc-use-free by his own */
189 if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
190 keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
193 /* tfm hmac is stateful, user should alloc-use-free by his own */
194 if (ke->ke_hash_hmac == 0 &&
195 keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
197 if (ke->ke_hash_hmac == 0 &&
198 keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
205 void keyblock_free(struct krb5_keyblock *kb)
207 rawobj_free(&kb->kb_key);
209 ll_crypto_free_blkcipher(kb->kb_tfm);
213 int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
215 return rawobj_dup(&new->kb_key, &kb->kb_key);
219 int get_bytes(char **ptr, const char *end, void *res, int len)
224 if (q > end || q < p)
232 int get_rawobj(char **ptr, const char *end, rawobj_t *res)
238 if (get_bytes(&p, end, &len, sizeof(len)))
242 if (q > end || q < p)
245 OBD_ALLOC_LARGE(res->data, len);
250 memcpy(res->data, p, len);
256 int get_keyblock(char **ptr, const char *end,
257 struct krb5_keyblock *kb, __u32 keysize)
261 OBD_ALLOC_LARGE(buf, keysize);
265 if (get_bytes(ptr, end, buf, keysize)) {
266 OBD_FREE_LARGE(buf, keysize);
270 kb->kb_key.len = keysize;
271 kb->kb_key.data = buf;
276 void delete_context_kerberos(struct krb5_ctx *kctx)
278 rawobj_free(&kctx->kc_mech_used);
280 keyblock_free(&kctx->kc_keye);
281 keyblock_free(&kctx->kc_keyi);
282 keyblock_free(&kctx->kc_keyc);
286 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
288 unsigned int tmp_uint, keysize;
291 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
293 kctx->kc_seed_init = (tmp_uint != 0);
296 if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
299 /* sign/seal algorithm, not really used now */
300 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
301 get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
305 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
309 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
311 kctx->kc_seq_send = tmp_uint;
314 if (get_rawobj(&p, end, &kctx->kc_mech_used))
317 /* old style enc/seq keys in format:
321 * we decompose them to fit into the new context
325 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
328 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
331 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
335 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
336 tmp_uint != kctx->kc_enctype)
339 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
343 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
346 /* old style fallback */
347 if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
353 CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
356 return GSS_S_FAILURE;
359 /* Flags for version 2 context flags */
360 #define KRB5_CTX_FLAG_INITIATOR 0x00000001
361 #define KRB5_CTX_FLAG_CFX 0x00000002
362 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
365 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
367 unsigned int tmp_uint, keysize;
370 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
374 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
377 if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
378 kctx->kc_initiate = 1;
379 if (tmp_uint & KRB5_CTX_FLAG_CFX)
381 if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
382 kctx->kc_have_acceptor_subkey = 1;
385 if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
389 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
392 /* size of each key */
393 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
396 /* number of keys - should always be 3 */
397 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
401 CERROR("Invalid number of keys: %u\n", tmp_uint);
406 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
409 if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
412 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
415 CDEBUG(D_SEC, "succesfully imported v2 context\n");
418 return GSS_S_FAILURE;
422 * The whole purpose here is trying to keep user level gss context parsing
423 * from nfs-utils unchanged as possible as we can, they are not quite mature
424 * yet, and many stuff still not clear, like heimdal etc.
427 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
428 struct gss_ctx *gctx)
430 struct krb5_ctx *kctx;
431 char *p = (char *) inbuf->data;
432 char *end = (char *) (inbuf->data + inbuf->len);
433 unsigned int tmp_uint, rc;
435 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
436 CERROR("Fail to read version\n");
437 return GSS_S_FAILURE;
440 /* only support 0, 1 for the moment */
442 CERROR("Invalid version %u\n", tmp_uint);
443 return GSS_S_FAILURE;
448 return GSS_S_FAILURE;
450 if (tmp_uint == 0 || tmp_uint == 1) {
451 kctx->kc_initiate = tmp_uint;
452 rc = import_context_rfc1964(kctx, p, end);
454 rc = import_context_rfc4121(kctx, p, end);
458 rc = krb5_init_keys(kctx);
461 delete_context_kerberos(kctx);
464 return GSS_S_FAILURE;
467 gctx->internal_ctx_id = kctx;
468 return GSS_S_COMPLETE;
472 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
473 struct gss_ctx *gctx_new)
475 struct krb5_ctx *kctx = gctx->internal_ctx_id;
476 struct krb5_ctx *knew;
480 return GSS_S_FAILURE;
482 knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
483 knew->kc_cfx = kctx->kc_cfx;
484 knew->kc_seed_init = kctx->kc_seed_init;
485 knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
486 knew->kc_endtime = kctx->kc_endtime;
488 memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
489 knew->kc_seq_send = kctx->kc_seq_recv;
490 knew->kc_seq_recv = kctx->kc_seq_send;
491 knew->kc_enctype = kctx->kc_enctype;
493 if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
496 if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
498 if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
500 if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
502 if (krb5_init_keys(knew))
505 gctx_new->internal_ctx_id = knew;
506 CDEBUG(D_SEC, "succesfully copied reverse context\n");
507 return GSS_S_COMPLETE;
510 delete_context_kerberos(knew);
512 return GSS_S_FAILURE;
516 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
517 unsigned long *endtime)
519 struct krb5_ctx *kctx = gctx->internal_ctx_id;
521 *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
522 return GSS_S_COMPLETE;
526 void gss_delete_sec_context_kerberos(void *internal_ctx)
528 struct krb5_ctx *kctx = internal_ctx;
530 delete_context_kerberos(kctx);
535 void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
537 sg->page = virt_to_page(ptr);
538 sg->offset = offset_in_page(ptr);
543 __u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
550 struct blkcipher_desc desc;
551 struct scatterlist sg;
552 __u8 local_iv[16] = {0};
557 desc.info = local_iv;
560 if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
561 CERROR("output length %d mismatch blocksize %d\n",
562 length, ll_crypto_blkcipher_blocksize(tfm));
566 if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
567 CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
572 memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
574 memcpy(out, in, length);
575 buf_to_sg(&sg, out, length);
578 ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
580 ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
586 #ifdef HAVE_ASYNC_BLOCK_CIPHER
589 int krb5_digest_hmac(struct ll_crypto_hash *tfm,
591 struct krb5_header *khdr,
592 int msgcnt, rawobj_t *msgs,
593 int iovcnt, lnet_kiov_t *iovs,
596 struct hash_desc desc;
597 struct scatterlist sg[1];
600 ll_crypto_hash_setkey(tfm, key->data, key->len);
604 ll_crypto_hash_init(&desc);
606 for (i = 0; i < msgcnt; i++) {
607 if (msgs[i].len == 0)
609 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
610 ll_crypto_hash_update(&desc, sg, msgs[i].len);
613 for (i = 0; i < iovcnt; i++) {
614 if (iovs[i].kiov_len == 0)
616 sg[0].page = iovs[i].kiov_page;
617 sg[0].offset = iovs[i].kiov_offset;
618 sg[0].length = iovs[i].kiov_len;
619 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
623 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
624 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
627 return ll_crypto_hash_final(&desc, cksum->data);
630 #else /* ! HAVE_ASYNC_BLOCK_CIPHER */
633 int krb5_digest_hmac(struct ll_crypto_hash *tfm,
635 struct krb5_header *khdr,
636 int msgcnt, rawobj_t *msgs,
637 int iovcnt, lnet_kiov_t *iovs,
640 struct scatterlist sg[1];
641 __u32 keylen = key->len, i;
643 crypto_hmac_init(tfm, key->data, &keylen);
645 for (i = 0; i < msgcnt; i++) {
646 if (msgs[i].len == 0)
648 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
649 crypto_hmac_update(tfm, sg, 1);
652 for (i = 0; i < iovcnt; i++) {
653 if (iovs[i].kiov_len == 0)
655 sg[0].page = iovs[i].kiov_page;
656 sg[0].offset = iovs[i].kiov_offset;
657 sg[0].length = iovs[i].kiov_len;
658 crypto_hmac_update(tfm, sg, 1);
662 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
663 crypto_hmac_update(tfm, sg, 1);
666 crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
670 #endif /* HAVE_ASYNC_BLOCK_CIPHER */
673 int krb5_digest_norm(struct ll_crypto_hash *tfm,
674 struct krb5_keyblock *kb,
675 struct krb5_header *khdr,
676 int msgcnt, rawobj_t *msgs,
677 int iovcnt, lnet_kiov_t *iovs,
680 struct hash_desc desc;
681 struct scatterlist sg[1];
688 ll_crypto_hash_init(&desc);
690 for (i = 0; i < msgcnt; i++) {
691 if (msgs[i].len == 0)
693 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
694 ll_crypto_hash_update(&desc, sg, msgs[i].len);
697 for (i = 0; i < iovcnt; i++) {
698 if (iovs[i].kiov_len == 0)
700 sg[0].page = iovs[i].kiov_page;
701 sg[0].offset = iovs[i].kiov_offset;
702 sg[0].length = iovs[i].kiov_len;
703 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
707 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
708 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
711 ll_crypto_hash_final(&desc, cksum->data);
713 return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
714 cksum->data, cksum->len);
718 * compute (keyed/keyless) checksum against the plain text which appended
719 * with krb5 wire token header.
722 __s32 krb5_make_checksum(__u32 enctype,
723 struct krb5_keyblock *kb,
724 struct krb5_header *khdr,
725 int msgcnt, rawobj_t *msgs,
726 int iovcnt, lnet_kiov_t *iovs,
729 struct krb5_enctype *ke = &enctypes[enctype];
730 struct ll_crypto_hash *tfm;
731 __u32 code = GSS_S_FAILURE;
734 if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
735 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
736 return GSS_S_FAILURE;
739 cksum->len = ll_crypto_hash_digestsize(tfm);
740 OBD_ALLOC_LARGE(cksum->data, cksum->len);
746 if (ke->ke_hash_hmac)
747 rc = krb5_digest_hmac(tfm, &kb->kb_key,
748 khdr, msgcnt, msgs, iovcnt, iovs, cksum);
750 rc = krb5_digest_norm(tfm, kb,
751 khdr, msgcnt, msgs, iovcnt, iovs, cksum);
754 code = GSS_S_COMPLETE;
756 ll_crypto_free_hash(tfm);
760 static void fill_krb5_header(struct krb5_ctx *kctx,
761 struct krb5_header *khdr,
764 unsigned char acceptor_flag;
766 acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
769 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
770 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
771 khdr->kh_ec = cpu_to_be16(0);
772 khdr->kh_rrc = cpu_to_be16(0);
774 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
775 khdr->kh_flags = acceptor_flag;
776 khdr->kh_ec = cpu_to_be16(0xffff);
777 khdr->kh_rrc = cpu_to_be16(0xffff);
780 khdr->kh_filler = 0xff;
781 cfs_spin_lock(&krb5_seq_lock);
782 khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
783 cfs_spin_unlock(&krb5_seq_lock);
786 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
787 struct krb5_header *khdr,
790 unsigned char acceptor_flag;
791 __u16 tok_id, ec_rrc;
793 acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
796 tok_id = KG_TOK_WRAP_MSG;
799 tok_id = KG_TOK_MIC_MSG;
804 if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
805 CERROR("bad token id\n");
806 return GSS_S_DEFECTIVE_TOKEN;
808 if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
809 CERROR("bad direction flag\n");
810 return GSS_S_BAD_SIG;
812 if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
813 CERROR("missing confidential flag\n");
814 return GSS_S_BAD_SIG;
816 if (khdr->kh_filler != 0xff) {
817 CERROR("bad filler\n");
818 return GSS_S_DEFECTIVE_TOKEN;
820 if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
821 be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
822 CERROR("bad EC or RRC\n");
823 return GSS_S_DEFECTIVE_TOKEN;
825 return GSS_S_COMPLETE;
829 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
836 struct krb5_ctx *kctx = gctx->internal_ctx_id;
837 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
838 struct krb5_header *khdr;
839 rawobj_t cksum = RAWOBJ_EMPTY;
841 /* fill krb5 header */
842 LASSERT(token->len >= sizeof(*khdr));
843 khdr = (struct krb5_header *) token->data;
844 fill_krb5_header(kctx, khdr, 0);
847 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
848 khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
849 return GSS_S_FAILURE;
851 LASSERT(cksum.len >= ke->ke_hash_size);
852 LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
853 memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
856 token->len = sizeof(*khdr) + ke->ke_hash_size;
858 return GSS_S_COMPLETE;
862 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
869 struct krb5_ctx *kctx = gctx->internal_ctx_id;
870 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
871 struct krb5_header *khdr;
872 rawobj_t cksum = RAWOBJ_EMPTY;
875 if (token->len < sizeof(*khdr)) {
876 CERROR("short signature: %u\n", token->len);
877 return GSS_S_DEFECTIVE_TOKEN;
880 khdr = (struct krb5_header *) token->data;
882 major = verify_krb5_header(kctx, khdr, 0);
883 if (major != GSS_S_COMPLETE) {
884 CERROR("bad krb5 header\n");
888 if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
889 CERROR("short signature: %u, require %d\n",
890 token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
891 return GSS_S_FAILURE;
894 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
895 khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
896 CERROR("failed to make checksum\n");
897 return GSS_S_FAILURE;
900 LASSERT(cksum.len >= ke->ke_hash_size);
901 if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
903 CERROR("checksum mismatch\n");
905 return GSS_S_BAD_SIG;
909 return GSS_S_COMPLETE;
913 int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
917 padding = (blocksize - (msg->len & (blocksize - 1))) &
922 if (msg->len + padding > msg_buflen) {
923 CERROR("bufsize %u too small: datalen %u, padding %u\n",
924 msg_buflen, msg->len, padding);
928 memset(msg->data + msg->len, padding, padding);
934 int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
941 struct blkcipher_desc desc;
942 struct scatterlist src, dst;
943 __u8 local_iv[16] = {0}, *buf;
950 desc.info = local_iv;
953 for (i = 0; i < inobj_cnt; i++) {
954 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
956 buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
957 buf_to_sg(&dst, buf, outobj->len - datalen);
961 rc = ll_crypto_blkcipher_encrypt(
962 &desc, &dst, &src, src.length);
964 rc = ll_crypto_blkcipher_decrypt(
965 &desc, &dst, &src, src.length);
968 rc = ll_crypto_blkcipher_encrypt_iv(
969 &desc, &dst, &src, src.length);
971 rc = ll_crypto_blkcipher_decrypt_iv(
972 &desc, &dst, &src, src.length);
976 CERROR("encrypt error %d\n", rc);
980 datalen += inobjs[i].len;
981 buf += inobjs[i].len;
984 outobj->len = datalen;
989 * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
992 int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
993 struct krb5_header *khdr,
995 struct ptlrpc_bulk_desc *desc,
999 struct blkcipher_desc ciph_desc;
1000 __u8 local_iv[16] = {0};
1001 struct scatterlist src, dst;
1002 int blocksize, i, rc, nob = 0;
1004 LASSERT(desc->bd_iov_count);
1005 LASSERT(desc->bd_enc_iov);
1007 blocksize = ll_crypto_blkcipher_blocksize(tfm);
1008 LASSERT(blocksize > 1);
1009 LASSERT(cipher->len == blocksize + sizeof(*khdr));
1011 ciph_desc.tfm = tfm;
1012 ciph_desc.info = local_iv;
1013 ciph_desc.flags = 0;
1015 /* encrypt confounder */
1016 buf_to_sg(&src, confounder, blocksize);
1017 buf_to_sg(&dst, cipher->data, blocksize);
1019 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
1021 CERROR("error to encrypt confounder: %d\n", rc);
1025 /* encrypt clear pages */
1026 for (i = 0; i < desc->bd_iov_count; i++) {
1027 src.page = desc->bd_iov[i].kiov_page;
1028 src.offset = desc->bd_iov[i].kiov_offset;
1029 src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
1035 dst.page = desc->bd_enc_iov[i].kiov_page;
1036 dst.offset = src.offset;
1037 dst.length = src.length;
1039 desc->bd_enc_iov[i].kiov_offset = dst.offset;
1040 desc->bd_enc_iov[i].kiov_len = dst.length;
1042 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
1045 CERROR("error to encrypt page: %d\n", rc);
1050 /* encrypt krb5 header */
1051 buf_to_sg(&src, khdr, sizeof(*khdr));
1052 buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1054 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
1055 &dst, &src, sizeof(*khdr));
1057 CERROR("error to encrypt krb5 header: %d\n", rc);
1068 * desc->bd_nob_transferred is the size of cipher text received.
1069 * desc->bd_nob is the target size of plain text supposed to be.
1071 * if adj_nob != 0, we adjust each page's kiov_len to the actual
1073 * - for client read: we don't know data size for each page, so
1074 * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
1075 * be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
1076 * this means we DO NOT support the situation that server send an odd size
1077 * data in a page which is not the last one.
1078 * - for server write: we knows exactly data size for each page being expected,
1079 * thus kiov_len is accurate already, so we should not adjust it at all.
1080 * and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
1081 * should have been done by prep_bulk().
1084 int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
1085 struct krb5_header *khdr,
1086 struct ptlrpc_bulk_desc *desc,
1091 struct blkcipher_desc ciph_desc;
1092 __u8 local_iv[16] = {0};
1093 struct scatterlist src, dst;
1094 int ct_nob = 0, pt_nob = 0;
1095 int blocksize, i, rc;
1097 LASSERT(desc->bd_iov_count);
1098 LASSERT(desc->bd_enc_iov);
1099 LASSERT(desc->bd_nob_transferred);
1101 blocksize = ll_crypto_blkcipher_blocksize(tfm);
1102 LASSERT(blocksize > 1);
1103 LASSERT(cipher->len == blocksize + sizeof(*khdr));
1105 ciph_desc.tfm = tfm;
1106 ciph_desc.info = local_iv;
1107 ciph_desc.flags = 0;
1109 if (desc->bd_nob_transferred % blocksize) {
1110 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
1114 /* decrypt head (confounder) */
1115 buf_to_sg(&src, cipher->data, blocksize);
1116 buf_to_sg(&dst, plain->data, blocksize);
1118 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
1120 CERROR("error to decrypt confounder: %d\n", rc);
1124 for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
1126 if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
1127 desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
1128 CERROR("page %d: odd offset %u len %u, blocksize %d\n",
1129 i, desc->bd_enc_iov[i].kiov_offset,
1130 desc->bd_enc_iov[i].kiov_len, blocksize);
1135 if (ct_nob + desc->bd_enc_iov[i].kiov_len >
1136 desc->bd_nob_transferred)
1137 desc->bd_enc_iov[i].kiov_len =
1138 desc->bd_nob_transferred - ct_nob;
1140 desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
1141 if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
1142 desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
1144 /* this should be guaranteed by LNET */
1145 LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
1146 desc->bd_nob_transferred);
1147 LASSERT(desc->bd_iov[i].kiov_len <=
1148 desc->bd_enc_iov[i].kiov_len);
1151 if (desc->bd_enc_iov[i].kiov_len == 0)
1154 src.page = desc->bd_enc_iov[i].kiov_page;
1155 src.offset = desc->bd_enc_iov[i].kiov_offset;
1156 src.length = desc->bd_enc_iov[i].kiov_len;
1159 if (desc->bd_iov[i].kiov_len % blocksize == 0)
1160 dst.page = desc->bd_iov[i].kiov_page;
1162 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
1165 CERROR("error to decrypt page: %d\n", rc);
1169 if (desc->bd_iov[i].kiov_len % blocksize != 0) {
1170 memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) +
1171 desc->bd_iov[i].kiov_offset,
1172 cfs_page_address(desc->bd_enc_iov[i].kiov_page) +
1173 desc->bd_iov[i].kiov_offset,
1174 desc->bd_iov[i].kiov_len);
1177 ct_nob += desc->bd_enc_iov[i].kiov_len;
1178 pt_nob += desc->bd_iov[i].kiov_len;
1181 if (unlikely(ct_nob != desc->bd_nob_transferred)) {
1182 CERROR("%d cipher text transferred but only %d decrypted\n",
1183 desc->bd_nob_transferred, ct_nob);
1187 if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
1188 CERROR("%d plain text expected but only %d received\n",
1189 desc->bd_nob, pt_nob);
1193 /* if needed, clear up the rest unused iovs */
1195 while (i < desc->bd_iov_count)
1196 desc->bd_iov[i++].kiov_len = 0;
1198 /* decrypt tail (krb5 header) */
1199 buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
1200 buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1202 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
1203 &dst, &src, sizeof(*khdr));
1205 CERROR("error to decrypt tail: %d\n", rc);
1209 if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
1210 CERROR("krb5 header doesn't match\n");
1218 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
1224 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1225 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1226 struct krb5_header *khdr;
1228 rawobj_t cksum = RAWOBJ_EMPTY;
1229 rawobj_t data_desc[3], cipher;
1230 __u8 conf[GSS_MAX_CIPHER_BLOCK];
1234 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1235 LASSERT(kctx->kc_keye.kb_tfm == NULL ||
1237 ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
1240 * final token format:
1241 * ---------------------------------------------------
1242 * | krb5 header | cipher text | checksum (16 bytes) |
1243 * ---------------------------------------------------
1246 /* fill krb5 header */
1247 LASSERT(token->len >= sizeof(*khdr));
1248 khdr = (struct krb5_header *) token->data;
1249 fill_krb5_header(kctx, khdr, 1);
1251 /* generate confounder */
1252 cfs_get_random_bytes(conf, ke->ke_conf_size);
1254 /* get encryption blocksize. note kc_keye might not associated with
1255 * a tfm, currently only for arcfour-hmac */
1256 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1257 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1260 LASSERT(kctx->kc_keye.kb_tfm);
1261 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1263 LASSERT(blocksize <= ke->ke_conf_size);
1265 /* padding the message */
1266 if (add_padding(msg, msg_buflen, blocksize))
1267 return GSS_S_FAILURE;
1270 * clear text layout for checksum:
1271 * ------------------------------------------------------
1272 * | confounder | gss header | clear msgs | krb5 header |
1273 * ------------------------------------------------------
1275 data_desc[0].data = conf;
1276 data_desc[0].len = ke->ke_conf_size;
1277 data_desc[1].data = gsshdr->data;
1278 data_desc[1].len = gsshdr->len;
1279 data_desc[2].data = msg->data;
1280 data_desc[2].len = msg->len;
1282 /* compute checksum */
1283 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1284 khdr, 3, data_desc, 0, NULL, &cksum))
1285 return GSS_S_FAILURE;
1286 LASSERT(cksum.len >= ke->ke_hash_size);
1289 * clear text layout for encryption:
1290 * -----------------------------------------
1291 * | confounder | clear msgs | krb5 header |
1292 * -----------------------------------------
1294 data_desc[0].data = conf;
1295 data_desc[0].len = ke->ke_conf_size;
1296 data_desc[1].data = msg->data;
1297 data_desc[1].len = msg->len;
1298 data_desc[2].data = (__u8 *) khdr;
1299 data_desc[2].len = sizeof(*khdr);
1301 /* cipher text will be directly inplace */
1302 cipher.data = (__u8 *) (khdr + 1);
1303 cipher.len = token->len - sizeof(*khdr);
1304 LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1306 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1308 struct ll_crypto_cipher *arc4_tfm;
1310 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1311 NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1312 CERROR("failed to obtain arc4 enc key\n");
1313 GOTO(arc4_out, rc = -EACCES);
1316 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1317 if (arc4_tfm == NULL) {
1318 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1319 GOTO(arc4_out_key, rc = -EACCES);
1322 if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1324 CERROR("failed to set arc4 key, len %d\n",
1326 GOTO(arc4_out_tfm, rc = -EACCES);
1329 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1330 3, data_desc, &cipher, 1);
1332 ll_crypto_free_blkcipher(arc4_tfm);
1334 rawobj_free(&arc4_keye);
1336 do {} while(0); /* just to avoid compile warning */
1338 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1339 3, data_desc, &cipher, 1);
1343 rawobj_free(&cksum);
1344 return GSS_S_FAILURE;
1347 /* fill in checksum */
1348 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1349 memcpy((char *)(khdr + 1) + cipher.len,
1350 cksum.data + cksum.len - ke->ke_hash_size,
1352 rawobj_free(&cksum);
1354 /* final token length */
1355 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1356 return GSS_S_COMPLETE;
1360 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1361 struct ptlrpc_bulk_desc *desc)
1363 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1366 LASSERT(desc->bd_iov_count);
1367 LASSERT(desc->bd_enc_iov);
1368 LASSERT(kctx->kc_keye.kb_tfm);
1370 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1372 for (i = 0; i < desc->bd_iov_count; i++) {
1373 LASSERT(desc->bd_enc_iov[i].kiov_page);
1375 * offset should always start at page boundary of either
1376 * client or server side.
1378 if (desc->bd_iov[i].kiov_offset & blocksize) {
1379 CERROR("odd offset %d in page %d\n",
1380 desc->bd_iov[i].kiov_offset, i);
1381 return GSS_S_FAILURE;
1384 desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
1385 desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
1386 blocksize - 1) & (~(blocksize - 1));
1389 return GSS_S_COMPLETE;
1393 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1394 struct ptlrpc_bulk_desc *desc,
1395 rawobj_t *token, int adj_nob)
1397 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1398 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1399 struct krb5_header *khdr;
1401 rawobj_t cksum = RAWOBJ_EMPTY;
1402 rawobj_t data_desc[1], cipher;
1403 __u8 conf[GSS_MAX_CIPHER_BLOCK];
1407 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1410 * final token format:
1411 * --------------------------------------------------
1412 * | krb5 header | head/tail cipher text | checksum |
1413 * --------------------------------------------------
1416 /* fill krb5 header */
1417 LASSERT(token->len >= sizeof(*khdr));
1418 khdr = (struct krb5_header *) token->data;
1419 fill_krb5_header(kctx, khdr, 1);
1421 /* generate confounder */
1422 cfs_get_random_bytes(conf, ke->ke_conf_size);
1424 /* get encryption blocksize. note kc_keye might not associated with
1425 * a tfm, currently only for arcfour-hmac */
1426 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1427 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1430 LASSERT(kctx->kc_keye.kb_tfm);
1431 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1435 * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1436 * the bulk token size would be exactly (sizeof(krb5_header) +
1437 * blocksize + sizeof(krb5_header) + hashsize)
1439 LASSERT(blocksize <= ke->ke_conf_size);
1440 LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1441 LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1444 * clear text layout for checksum:
1445 * ------------------------------------------
1446 * | confounder | clear pages | krb5 header |
1447 * ------------------------------------------
1449 data_desc[0].data = conf;
1450 data_desc[0].len = ke->ke_conf_size;
1452 /* compute checksum */
1453 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1455 desc->bd_iov_count, desc->bd_iov,
1457 return GSS_S_FAILURE;
1458 LASSERT(cksum.len >= ke->ke_hash_size);
1461 * clear text layout for encryption:
1462 * ------------------------------------------
1463 * | confounder | clear pages | krb5 header |
1464 * ------------------------------------------
1466 * ---------- (cipher pages) |
1468 * -------------------------------------------
1469 * | krb5 header | cipher text | cipher text |
1470 * -------------------------------------------
1472 data_desc[0].data = conf;
1473 data_desc[0].len = ke->ke_conf_size;
1475 cipher.data = (__u8 *) (khdr + 1);
1476 cipher.len = blocksize + sizeof(*khdr);
1478 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1482 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1483 conf, desc, &cipher, adj_nob);
1487 rawobj_free(&cksum);
1488 return GSS_S_FAILURE;
1491 /* fill in checksum */
1492 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1493 memcpy((char *)(khdr + 1) + cipher.len,
1494 cksum.data + cksum.len - ke->ke_hash_size,
1496 rawobj_free(&cksum);
1498 /* final token length */
1499 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1500 return GSS_S_COMPLETE;
1504 __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
1509 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1510 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1511 struct krb5_header *khdr;
1512 unsigned char *tmpbuf;
1513 int blocksize, bodysize;
1514 rawobj_t cksum = RAWOBJ_EMPTY;
1515 rawobj_t cipher_in, plain_out;
1516 rawobj_t hash_objs[3];
1522 if (token->len < sizeof(*khdr)) {
1523 CERROR("short signature: %u\n", token->len);
1524 return GSS_S_DEFECTIVE_TOKEN;
1527 khdr = (struct krb5_header *) token->data;
1529 major = verify_krb5_header(kctx, khdr, 1);
1530 if (major != GSS_S_COMPLETE) {
1531 CERROR("bad krb5 header\n");
1536 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1537 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1540 LASSERT(kctx->kc_keye.kb_tfm);
1541 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1544 /* expected token layout:
1545 * ----------------------------------------
1546 * | krb5 header | cipher text | checksum |
1547 * ----------------------------------------
1549 bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1551 if (bodysize % blocksize) {
1552 CERROR("odd bodysize %d\n", bodysize);
1553 return GSS_S_DEFECTIVE_TOKEN;
1556 if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1557 CERROR("incomplete token: bodysize %d\n", bodysize);
1558 return GSS_S_DEFECTIVE_TOKEN;
1561 if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1562 CERROR("buffer too small: %u, require %d\n",
1563 msg->len, bodysize - ke->ke_conf_size);
1564 return GSS_S_FAILURE;
1568 OBD_ALLOC_LARGE(tmpbuf, bodysize);
1570 return GSS_S_FAILURE;
1572 major = GSS_S_FAILURE;
1574 cipher_in.data = (__u8 *) (khdr + 1);
1575 cipher_in.len = bodysize;
1576 plain_out.data = tmpbuf;
1577 plain_out.len = bodysize;
1579 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1581 struct ll_crypto_cipher *arc4_tfm;
1583 cksum.data = token->data + token->len - ke->ke_hash_size;
1584 cksum.len = ke->ke_hash_size;
1586 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1587 NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1588 CERROR("failed to obtain arc4 enc key\n");
1589 GOTO(arc4_out, rc = -EACCES);
1592 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1593 if (arc4_tfm == NULL) {
1594 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1595 GOTO(arc4_out_key, rc = -EACCES);
1598 if (ll_crypto_blkcipher_setkey(arc4_tfm,
1599 arc4_keye.data, arc4_keye.len)) {
1600 CERROR("failed to set arc4 key, len %d\n",
1602 GOTO(arc4_out_tfm, rc = -EACCES);
1605 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1606 1, &cipher_in, &plain_out, 0);
1608 ll_crypto_free_blkcipher(arc4_tfm);
1610 rawobj_free(&arc4_keye);
1612 cksum = RAWOBJ_EMPTY;
1614 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1615 1, &cipher_in, &plain_out, 0);
1619 CERROR("error decrypt\n");
1622 LASSERT(plain_out.len == bodysize);
1624 /* expected clear text layout:
1625 * -----------------------------------------
1626 * | confounder | clear msgs | krb5 header |
1627 * -----------------------------------------
1630 /* verify krb5 header in token is not modified */
1631 if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1633 CERROR("decrypted krb5 header mismatch\n");
1637 /* verify checksum, compose clear text as layout:
1638 * ------------------------------------------------------
1639 * | confounder | gss header | clear msgs | krb5 header |
1640 * ------------------------------------------------------
1642 hash_objs[0].len = ke->ke_conf_size;
1643 hash_objs[0].data = plain_out.data;
1644 hash_objs[1].len = gsshdr->len;
1645 hash_objs[1].data = gsshdr->data;
1646 hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1647 hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1648 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1649 khdr, 3, hash_objs, 0, NULL, &cksum))
1652 LASSERT(cksum.len >= ke->ke_hash_size);
1653 if (memcmp((char *)(khdr + 1) + bodysize,
1654 cksum.data + cksum.len - ke->ke_hash_size,
1655 ke->ke_hash_size)) {
1656 CERROR("checksum mismatch\n");
1660 msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr);
1661 memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1663 major = GSS_S_COMPLETE;
1665 OBD_FREE_LARGE(tmpbuf, bodysize);
1666 rawobj_free(&cksum);
1671 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1672 struct ptlrpc_bulk_desc *desc,
1673 rawobj_t *token, int adj_nob)
1675 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1676 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1677 struct krb5_header *khdr;
1679 rawobj_t cksum = RAWOBJ_EMPTY;
1680 rawobj_t cipher, plain;
1681 rawobj_t data_desc[1];
1687 if (token->len < sizeof(*khdr)) {
1688 CERROR("short signature: %u\n", token->len);
1689 return GSS_S_DEFECTIVE_TOKEN;
1692 khdr = (struct krb5_header *) token->data;
1694 major = verify_krb5_header(kctx, khdr, 1);
1695 if (major != GSS_S_COMPLETE) {
1696 CERROR("bad krb5 header\n");
1701 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1702 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1706 LASSERT(kctx->kc_keye.kb_tfm);
1707 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1709 LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1712 * token format is expected as:
1713 * -----------------------------------------------
1714 * | krb5 header | head/tail cipher text | cksum |
1715 * -----------------------------------------------
1717 if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1719 CERROR("short token size: %u\n", token->len);
1720 return GSS_S_DEFECTIVE_TOKEN;
1723 cipher.data = (__u8 *) (khdr + 1);
1724 cipher.len = blocksize + sizeof(*khdr);
1725 plain.data = cipher.data;
1726 plain.len = cipher.len;
1728 rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1729 desc, &cipher, &plain, adj_nob);
1731 return GSS_S_DEFECTIVE_TOKEN;
1734 * verify checksum, compose clear text as layout:
1735 * ------------------------------------------
1736 * | confounder | clear pages | krb5 header |
1737 * ------------------------------------------
1739 data_desc[0].data = plain.data;
1740 data_desc[0].len = blocksize;
1742 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1744 desc->bd_iov_count, desc->bd_iov,
1746 return GSS_S_FAILURE;
1747 LASSERT(cksum.len >= ke->ke_hash_size);
1749 if (memcmp(plain.data + blocksize + sizeof(*khdr),
1750 cksum.data + cksum.len - ke->ke_hash_size,
1751 ke->ke_hash_size)) {
1752 CERROR("checksum mismatch\n");
1753 rawobj_free(&cksum);
1754 return GSS_S_BAD_SIG;
1757 rawobj_free(&cksum);
1758 return GSS_S_COMPLETE;
1761 int gss_display_kerberos(struct gss_ctx *ctx,
1765 struct krb5_ctx *kctx = ctx->internal_ctx_id;
1768 written = snprintf(buf, bufsize, "krb5 (%s)",
1769 enctype2str(kctx->kc_enctype));
1773 static struct gss_api_ops gss_kerberos_ops = {
1774 .gss_import_sec_context = gss_import_sec_context_kerberos,
1775 .gss_copy_reverse_context = gss_copy_reverse_context_kerberos,
1776 .gss_inquire_context = gss_inquire_context_kerberos,
1777 .gss_get_mic = gss_get_mic_kerberos,
1778 .gss_verify_mic = gss_verify_mic_kerberos,
1779 .gss_wrap = gss_wrap_kerberos,
1780 .gss_unwrap = gss_unwrap_kerberos,
1781 .gss_prep_bulk = gss_prep_bulk_kerberos,
1782 .gss_wrap_bulk = gss_wrap_bulk_kerberos,
1783 .gss_unwrap_bulk = gss_unwrap_bulk_kerberos,
1784 .gss_delete_sec_context = gss_delete_sec_context_kerberos,
1785 .gss_display = gss_display_kerberos,
1788 static struct subflavor_desc gss_kerberos_sfs[] = {
1790 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5N,
1792 .sf_service = SPTLRPC_SVC_NULL,
1796 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5A,
1798 .sf_service = SPTLRPC_SVC_AUTH,
1802 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
1804 .sf_service = SPTLRPC_SVC_INTG,
1808 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5P,
1810 .sf_service = SPTLRPC_SVC_PRIV,
1816 * currently we leave module owner NULL
1818 static struct gss_api_mech gss_kerberos_mech = {
1819 .gm_owner = NULL, /*THIS_MODULE, */
1821 .gm_oid = (rawobj_t)
1822 {9, "\052\206\110\206\367\022\001\002\002"},
1823 .gm_ops = &gss_kerberos_ops,
1825 .gm_sfs = gss_kerberos_sfs,
1828 int __init init_kerberos_module(void)
1832 cfs_spin_lock_init(&krb5_seq_lock);
1834 status = lgss_mech_register(&gss_kerberos_mech);
1836 CERROR("Failed to register kerberos gss mechanism!\n");
1840 void __exit cleanup_kerberos_module(void)
1842 lgss_mech_unregister(&gss_kerberos_mech);