2 * Modifications for Lustre
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
6 * Copyright (c) 2011, Whamcloud, Inc.
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * linux/net/sunrpc/gss_krb5_mech.c
13 * linux/net/sunrpc/gss_krb5_crypto.c
14 * linux/net/sunrpc/gss_krb5_seal.c
15 * linux/net/sunrpc/gss_krb5_seqnum.c
16 * linux/net/sunrpc/gss_krb5_unseal.c
18 * Copyright (c) 2001 The Regents of the University of Michigan.
19 * All rights reserved.
21 * Andy Adamson <andros@umich.edu>
22 * J. Bruce Fields <bfields@umich.edu>
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
28 * 1. Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 * 3. Neither the name of the University nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #define DEBUG_SUBSYSTEM S_SEC
53 #include <linux/init.h>
54 #include <linux/module.h>
55 #include <linux/slab.h>
56 #include <linux/crypto.h>
57 #include <linux/mutex.h>
59 #include <liblustre.h>
63 #include <obd_class.h>
64 #include <obd_support.h>
65 #include <lustre/lustre_idl.h>
66 #include <lustre_net.h>
67 #include <lustre_import.h>
68 #include <lustre_sec.h>
71 #include "gss_internal.h"
76 static spinlock_t krb5_seq_lock;
80 char *ke_enc_name; /* linux tfm name */
81 char *ke_hash_name; /* linux tfm name */
82 int ke_enc_mode; /* linux tfm mode */
83 int ke_hash_size; /* checksum size */
84 int ke_conf_size; /* confounder size */
85 unsigned int ke_hash_hmac:1; /* is hmac? */
89 * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
90 * but currently we simply CBC with padding, because linux doesn't support CTS
91 * yet. this need to be fixed in the future.
93 static struct krb5_enctype enctypes[] = {
94 [ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */
103 [ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */
112 [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */
113 "aes128-cts-hmac-sha1-96",
121 [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */
122 "aes256-cts-hmac-sha1-96",
130 [ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */
141 #define MAX_ENCTYPES sizeof(enctypes)/sizeof(struct krb5_enctype)
143 static const char * enctype2str(__u32 enctype)
145 if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
146 return enctypes[enctype].ke_dispname;
152 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
154 kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
155 if (IS_ERR(kb->kb_tfm)) {
156 CERROR("failed to alloc tfm: %s, mode %d\n",
161 if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
162 CERROR("failed to set %s key, len %d\n",
163 alg_name, kb->kb_key.len);
171 int krb5_init_keys(struct krb5_ctx *kctx)
173 struct krb5_enctype *ke;
175 if (kctx->kc_enctype >= MAX_ENCTYPES ||
176 enctypes[kctx->kc_enctype].ke_hash_size == 0) {
177 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
181 ke = &enctypes[kctx->kc_enctype];
183 /* tfm arc4 is stateful, user should alloc-use-free by his own */
184 if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
185 keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
188 /* tfm hmac is stateful, user should alloc-use-free by his own */
189 if (ke->ke_hash_hmac == 0 &&
190 keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
192 if (ke->ke_hash_hmac == 0 &&
193 keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
200 void keyblock_free(struct krb5_keyblock *kb)
202 rawobj_free(&kb->kb_key);
204 ll_crypto_free_blkcipher(kb->kb_tfm);
208 int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
210 return rawobj_dup(&new->kb_key, &kb->kb_key);
214 int get_bytes(char **ptr, const char *end, void *res, int len)
219 if (q > end || q < p)
227 int get_rawobj(char **ptr, const char *end, rawobj_t *res)
233 if (get_bytes(&p, end, &len, sizeof(len)))
237 if (q > end || q < p)
240 OBD_ALLOC_LARGE(res->data, len);
245 memcpy(res->data, p, len);
251 int get_keyblock(char **ptr, const char *end,
252 struct krb5_keyblock *kb, __u32 keysize)
256 OBD_ALLOC_LARGE(buf, keysize);
260 if (get_bytes(ptr, end, buf, keysize)) {
261 OBD_FREE_LARGE(buf, keysize);
265 kb->kb_key.len = keysize;
266 kb->kb_key.data = buf;
271 void delete_context_kerberos(struct krb5_ctx *kctx)
273 rawobj_free(&kctx->kc_mech_used);
275 keyblock_free(&kctx->kc_keye);
276 keyblock_free(&kctx->kc_keyi);
277 keyblock_free(&kctx->kc_keyc);
281 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
283 unsigned int tmp_uint, keysize;
286 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
288 kctx->kc_seed_init = (tmp_uint != 0);
291 if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
294 /* sign/seal algorithm, not really used now */
295 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
296 get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
300 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
304 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
306 kctx->kc_seq_send = tmp_uint;
309 if (get_rawobj(&p, end, &kctx->kc_mech_used))
312 /* old style enc/seq keys in format:
316 * we decompose them to fit into the new context
320 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
323 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
326 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
330 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
331 tmp_uint != kctx->kc_enctype)
334 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
338 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
341 /* old style fallback */
342 if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
348 CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
351 return GSS_S_FAILURE;
354 /* Flags for version 2 context flags */
355 #define KRB5_CTX_FLAG_INITIATOR 0x00000001
356 #define KRB5_CTX_FLAG_CFX 0x00000002
357 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
360 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
362 unsigned int tmp_uint, keysize;
365 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
369 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
372 if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
373 kctx->kc_initiate = 1;
374 if (tmp_uint & KRB5_CTX_FLAG_CFX)
376 if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
377 kctx->kc_have_acceptor_subkey = 1;
380 if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
384 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
387 /* size of each key */
388 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
391 /* number of keys - should always be 3 */
392 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
396 CERROR("Invalid number of keys: %u\n", tmp_uint);
401 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
404 if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
407 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
410 CDEBUG(D_SEC, "succesfully imported v2 context\n");
413 return GSS_S_FAILURE;
417 * The whole purpose here is trying to keep user level gss context parsing
418 * from nfs-utils unchanged as possible as we can, they are not quite mature
419 * yet, and many stuff still not clear, like heimdal etc.
422 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
423 struct gss_ctx *gctx)
425 struct krb5_ctx *kctx;
426 char *p = (char *) inbuf->data;
427 char *end = (char *) (inbuf->data + inbuf->len);
428 unsigned int tmp_uint, rc;
430 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
431 CERROR("Fail to read version\n");
432 return GSS_S_FAILURE;
435 /* only support 0, 1 for the moment */
437 CERROR("Invalid version %u\n", tmp_uint);
438 return GSS_S_FAILURE;
443 return GSS_S_FAILURE;
445 if (tmp_uint == 0 || tmp_uint == 1) {
446 kctx->kc_initiate = tmp_uint;
447 rc = import_context_rfc1964(kctx, p, end);
449 rc = import_context_rfc4121(kctx, p, end);
453 rc = krb5_init_keys(kctx);
456 delete_context_kerberos(kctx);
459 return GSS_S_FAILURE;
462 gctx->internal_ctx_id = kctx;
463 return GSS_S_COMPLETE;
467 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
468 struct gss_ctx *gctx_new)
470 struct krb5_ctx *kctx = gctx->internal_ctx_id;
471 struct krb5_ctx *knew;
475 return GSS_S_FAILURE;
477 knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
478 knew->kc_cfx = kctx->kc_cfx;
479 knew->kc_seed_init = kctx->kc_seed_init;
480 knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
481 knew->kc_endtime = kctx->kc_endtime;
483 memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
484 knew->kc_seq_send = kctx->kc_seq_recv;
485 knew->kc_seq_recv = kctx->kc_seq_send;
486 knew->kc_enctype = kctx->kc_enctype;
488 if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
491 if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
493 if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
495 if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
497 if (krb5_init_keys(knew))
500 gctx_new->internal_ctx_id = knew;
501 CDEBUG(D_SEC, "succesfully copied reverse context\n");
502 return GSS_S_COMPLETE;
505 delete_context_kerberos(knew);
507 return GSS_S_FAILURE;
511 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
512 unsigned long *endtime)
514 struct krb5_ctx *kctx = gctx->internal_ctx_id;
516 *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
517 return GSS_S_COMPLETE;
521 void gss_delete_sec_context_kerberos(void *internal_ctx)
523 struct krb5_ctx *kctx = internal_ctx;
525 delete_context_kerberos(kctx);
530 void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
532 sg->page = virt_to_page(ptr);
533 sg->offset = offset_in_page(ptr);
538 __u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
545 struct blkcipher_desc desc;
546 struct scatterlist sg;
547 __u8 local_iv[16] = {0};
552 desc.info = local_iv;
555 if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
556 CERROR("output length %d mismatch blocksize %d\n",
557 length, ll_crypto_blkcipher_blocksize(tfm));
561 if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
562 CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
567 memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
569 memcpy(out, in, length);
570 buf_to_sg(&sg, out, length);
573 ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
575 ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
581 #ifdef HAVE_ASYNC_BLOCK_CIPHER
584 int krb5_digest_hmac(struct ll_crypto_hash *tfm,
586 struct krb5_header *khdr,
587 int msgcnt, rawobj_t *msgs,
588 int iovcnt, lnet_kiov_t *iovs,
591 struct hash_desc desc;
592 struct scatterlist sg[1];
595 ll_crypto_hash_setkey(tfm, key->data, key->len);
599 ll_crypto_hash_init(&desc);
601 for (i = 0; i < msgcnt; i++) {
602 if (msgs[i].len == 0)
604 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
605 ll_crypto_hash_update(&desc, sg, msgs[i].len);
608 for (i = 0; i < iovcnt; i++) {
609 if (iovs[i].kiov_len == 0)
611 sg[0].page = iovs[i].kiov_page;
612 sg[0].offset = iovs[i].kiov_offset;
613 sg[0].length = iovs[i].kiov_len;
614 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
618 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
619 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
622 return ll_crypto_hash_final(&desc, cksum->data);
625 #else /* ! HAVE_ASYNC_BLOCK_CIPHER */
628 int krb5_digest_hmac(struct ll_crypto_hash *tfm,
630 struct krb5_header *khdr,
631 int msgcnt, rawobj_t *msgs,
632 int iovcnt, lnet_kiov_t *iovs,
635 struct scatterlist sg[1];
636 __u32 keylen = key->len, i;
638 crypto_hmac_init(tfm, key->data, &keylen);
640 for (i = 0; i < msgcnt; i++) {
641 if (msgs[i].len == 0)
643 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
644 crypto_hmac_update(tfm, sg, 1);
647 for (i = 0; i < iovcnt; i++) {
648 if (iovs[i].kiov_len == 0)
650 sg[0].page = iovs[i].kiov_page;
651 sg[0].offset = iovs[i].kiov_offset;
652 sg[0].length = iovs[i].kiov_len;
653 crypto_hmac_update(tfm, sg, 1);
657 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
658 crypto_hmac_update(tfm, sg, 1);
661 crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
665 #endif /* HAVE_ASYNC_BLOCK_CIPHER */
668 int krb5_digest_norm(struct ll_crypto_hash *tfm,
669 struct krb5_keyblock *kb,
670 struct krb5_header *khdr,
671 int msgcnt, rawobj_t *msgs,
672 int iovcnt, lnet_kiov_t *iovs,
675 struct hash_desc desc;
676 struct scatterlist sg[1];
683 ll_crypto_hash_init(&desc);
685 for (i = 0; i < msgcnt; i++) {
686 if (msgs[i].len == 0)
688 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
689 ll_crypto_hash_update(&desc, sg, msgs[i].len);
692 for (i = 0; i < iovcnt; i++) {
693 if (iovs[i].kiov_len == 0)
695 sg[0].page = iovs[i].kiov_page;
696 sg[0].offset = iovs[i].kiov_offset;
697 sg[0].length = iovs[i].kiov_len;
698 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
702 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
703 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
706 ll_crypto_hash_final(&desc, cksum->data);
708 return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
709 cksum->data, cksum->len);
713 * compute (keyed/keyless) checksum against the plain text which appended
714 * with krb5 wire token header.
717 __s32 krb5_make_checksum(__u32 enctype,
718 struct krb5_keyblock *kb,
719 struct krb5_header *khdr,
720 int msgcnt, rawobj_t *msgs,
721 int iovcnt, lnet_kiov_t *iovs,
724 struct krb5_enctype *ke = &enctypes[enctype];
725 struct ll_crypto_hash *tfm;
726 __u32 code = GSS_S_FAILURE;
729 if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
730 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
731 return GSS_S_FAILURE;
734 cksum->len = ll_crypto_hash_digestsize(tfm);
735 OBD_ALLOC_LARGE(cksum->data, cksum->len);
741 if (ke->ke_hash_hmac)
742 rc = krb5_digest_hmac(tfm, &kb->kb_key,
743 khdr, msgcnt, msgs, iovcnt, iovs, cksum);
745 rc = krb5_digest_norm(tfm, kb,
746 khdr, msgcnt, msgs, iovcnt, iovs, cksum);
749 code = GSS_S_COMPLETE;
751 ll_crypto_free_hash(tfm);
755 static void fill_krb5_header(struct krb5_ctx *kctx,
756 struct krb5_header *khdr,
759 unsigned char acceptor_flag;
761 acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
764 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
765 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
766 khdr->kh_ec = cpu_to_be16(0);
767 khdr->kh_rrc = cpu_to_be16(0);
769 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
770 khdr->kh_flags = acceptor_flag;
771 khdr->kh_ec = cpu_to_be16(0xffff);
772 khdr->kh_rrc = cpu_to_be16(0xffff);
775 khdr->kh_filler = 0xff;
776 spin_lock(&krb5_seq_lock);
777 khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
778 spin_unlock(&krb5_seq_lock);
781 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
782 struct krb5_header *khdr,
785 unsigned char acceptor_flag;
786 __u16 tok_id, ec_rrc;
788 acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
791 tok_id = KG_TOK_WRAP_MSG;
794 tok_id = KG_TOK_MIC_MSG;
799 if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
800 CERROR("bad token id\n");
801 return GSS_S_DEFECTIVE_TOKEN;
803 if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
804 CERROR("bad direction flag\n");
805 return GSS_S_BAD_SIG;
807 if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
808 CERROR("missing confidential flag\n");
809 return GSS_S_BAD_SIG;
811 if (khdr->kh_filler != 0xff) {
812 CERROR("bad filler\n");
813 return GSS_S_DEFECTIVE_TOKEN;
815 if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
816 be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
817 CERROR("bad EC or RRC\n");
818 return GSS_S_DEFECTIVE_TOKEN;
820 return GSS_S_COMPLETE;
824 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
831 struct krb5_ctx *kctx = gctx->internal_ctx_id;
832 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
833 struct krb5_header *khdr;
834 rawobj_t cksum = RAWOBJ_EMPTY;
836 /* fill krb5 header */
837 LASSERT(token->len >= sizeof(*khdr));
838 khdr = (struct krb5_header *) token->data;
839 fill_krb5_header(kctx, khdr, 0);
842 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
843 khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
844 return GSS_S_FAILURE;
846 LASSERT(cksum.len >= ke->ke_hash_size);
847 LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
848 memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
851 token->len = sizeof(*khdr) + ke->ke_hash_size;
853 return GSS_S_COMPLETE;
857 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
864 struct krb5_ctx *kctx = gctx->internal_ctx_id;
865 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
866 struct krb5_header *khdr;
867 rawobj_t cksum = RAWOBJ_EMPTY;
870 if (token->len < sizeof(*khdr)) {
871 CERROR("short signature: %u\n", token->len);
872 return GSS_S_DEFECTIVE_TOKEN;
875 khdr = (struct krb5_header *) token->data;
877 major = verify_krb5_header(kctx, khdr, 0);
878 if (major != GSS_S_COMPLETE) {
879 CERROR("bad krb5 header\n");
883 if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
884 CERROR("short signature: %u, require %d\n",
885 token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
886 return GSS_S_FAILURE;
889 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
890 khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
891 CERROR("failed to make checksum\n");
892 return GSS_S_FAILURE;
895 LASSERT(cksum.len >= ke->ke_hash_size);
896 if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
898 CERROR("checksum mismatch\n");
900 return GSS_S_BAD_SIG;
904 return GSS_S_COMPLETE;
908 int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
912 padding = (blocksize - (msg->len & (blocksize - 1))) &
917 if (msg->len + padding > msg_buflen) {
918 CERROR("bufsize %u too small: datalen %u, padding %u\n",
919 msg_buflen, msg->len, padding);
923 memset(msg->data + msg->len, padding, padding);
929 int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
936 struct blkcipher_desc desc;
937 struct scatterlist src, dst;
938 __u8 local_iv[16] = {0}, *buf;
945 desc.info = local_iv;
948 for (i = 0; i < inobj_cnt; i++) {
949 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
951 buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
952 buf_to_sg(&dst, buf, outobj->len - datalen);
956 rc = ll_crypto_blkcipher_encrypt(
957 &desc, &dst, &src, src.length);
959 rc = ll_crypto_blkcipher_decrypt(
960 &desc, &dst, &src, src.length);
963 rc = ll_crypto_blkcipher_encrypt_iv(
964 &desc, &dst, &src, src.length);
966 rc = ll_crypto_blkcipher_decrypt_iv(
967 &desc, &dst, &src, src.length);
971 CERROR("encrypt error %d\n", rc);
975 datalen += inobjs[i].len;
976 buf += inobjs[i].len;
979 outobj->len = datalen;
984 * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
987 int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
988 struct krb5_header *khdr,
990 struct ptlrpc_bulk_desc *desc,
994 struct blkcipher_desc ciph_desc;
995 __u8 local_iv[16] = {0};
996 struct scatterlist src, dst;
997 int blocksize, i, rc, nob = 0;
999 LASSERT(desc->bd_iov_count);
1000 LASSERT(desc->bd_enc_iov);
1002 blocksize = ll_crypto_blkcipher_blocksize(tfm);
1003 LASSERT(blocksize > 1);
1004 LASSERT(cipher->len == blocksize + sizeof(*khdr));
1006 ciph_desc.tfm = tfm;
1007 ciph_desc.info = local_iv;
1008 ciph_desc.flags = 0;
1010 /* encrypt confounder */
1011 buf_to_sg(&src, confounder, blocksize);
1012 buf_to_sg(&dst, cipher->data, blocksize);
1014 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
1016 CERROR("error to encrypt confounder: %d\n", rc);
1020 /* encrypt clear pages */
1021 for (i = 0; i < desc->bd_iov_count; i++) {
1022 src.page = desc->bd_iov[i].kiov_page;
1023 src.offset = desc->bd_iov[i].kiov_offset;
1024 src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
1030 dst.page = desc->bd_enc_iov[i].kiov_page;
1031 dst.offset = src.offset;
1032 dst.length = src.length;
1034 desc->bd_enc_iov[i].kiov_offset = dst.offset;
1035 desc->bd_enc_iov[i].kiov_len = dst.length;
1037 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
1040 CERROR("error to encrypt page: %d\n", rc);
1045 /* encrypt krb5 header */
1046 buf_to_sg(&src, khdr, sizeof(*khdr));
1047 buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1049 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
1050 &dst, &src, sizeof(*khdr));
1052 CERROR("error to encrypt krb5 header: %d\n", rc);
1063 * desc->bd_nob_transferred is the size of cipher text received.
1064 * desc->bd_nob is the target size of plain text supposed to be.
1066 * if adj_nob != 0, we adjust each page's kiov_len to the actual
1068 * - for client read: we don't know data size for each page, so
1069 * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
1070 * be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
1071 * this means we DO NOT support the situation that server send an odd size
1072 * data in a page which is not the last one.
1073 * - for server write: we knows exactly data size for each page being expected,
1074 * thus kiov_len is accurate already, so we should not adjust it at all.
1075 * and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
1076 * should have been done by prep_bulk().
1079 int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
1080 struct krb5_header *khdr,
1081 struct ptlrpc_bulk_desc *desc,
1086 struct blkcipher_desc ciph_desc;
1087 __u8 local_iv[16] = {0};
1088 struct scatterlist src, dst;
1089 int ct_nob = 0, pt_nob = 0;
1090 int blocksize, i, rc;
1092 LASSERT(desc->bd_iov_count);
1093 LASSERT(desc->bd_enc_iov);
1094 LASSERT(desc->bd_nob_transferred);
1096 blocksize = ll_crypto_blkcipher_blocksize(tfm);
1097 LASSERT(blocksize > 1);
1098 LASSERT(cipher->len == blocksize + sizeof(*khdr));
1100 ciph_desc.tfm = tfm;
1101 ciph_desc.info = local_iv;
1102 ciph_desc.flags = 0;
1104 if (desc->bd_nob_transferred % blocksize) {
1105 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
1109 /* decrypt head (confounder) */
1110 buf_to_sg(&src, cipher->data, blocksize);
1111 buf_to_sg(&dst, plain->data, blocksize);
1113 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
1115 CERROR("error to decrypt confounder: %d\n", rc);
1119 for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
1121 if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
1122 desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
1123 CERROR("page %d: odd offset %u len %u, blocksize %d\n",
1124 i, desc->bd_enc_iov[i].kiov_offset,
1125 desc->bd_enc_iov[i].kiov_len, blocksize);
1130 if (ct_nob + desc->bd_enc_iov[i].kiov_len >
1131 desc->bd_nob_transferred)
1132 desc->bd_enc_iov[i].kiov_len =
1133 desc->bd_nob_transferred - ct_nob;
1135 desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
1136 if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
1137 desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
1139 /* this should be guaranteed by LNET */
1140 LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
1141 desc->bd_nob_transferred);
1142 LASSERT(desc->bd_iov[i].kiov_len <=
1143 desc->bd_enc_iov[i].kiov_len);
1146 if (desc->bd_enc_iov[i].kiov_len == 0)
1149 src.page = desc->bd_enc_iov[i].kiov_page;
1150 src.offset = desc->bd_enc_iov[i].kiov_offset;
1151 src.length = desc->bd_enc_iov[i].kiov_len;
1154 if (desc->bd_iov[i].kiov_len % blocksize == 0)
1155 dst.page = desc->bd_iov[i].kiov_page;
1157 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
1160 CERROR("error to decrypt page: %d\n", rc);
1164 if (desc->bd_iov[i].kiov_len % blocksize != 0) {
1165 memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) +
1166 desc->bd_iov[i].kiov_offset,
1167 cfs_page_address(desc->bd_enc_iov[i].kiov_page) +
1168 desc->bd_iov[i].kiov_offset,
1169 desc->bd_iov[i].kiov_len);
1172 ct_nob += desc->bd_enc_iov[i].kiov_len;
1173 pt_nob += desc->bd_iov[i].kiov_len;
1176 if (unlikely(ct_nob != desc->bd_nob_transferred)) {
1177 CERROR("%d cipher text transferred but only %d decrypted\n",
1178 desc->bd_nob_transferred, ct_nob);
1182 if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
1183 CERROR("%d plain text expected but only %d received\n",
1184 desc->bd_nob, pt_nob);
1188 /* if needed, clear up the rest unused iovs */
1190 while (i < desc->bd_iov_count)
1191 desc->bd_iov[i++].kiov_len = 0;
1193 /* decrypt tail (krb5 header) */
1194 buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
1195 buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1197 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
1198 &dst, &src, sizeof(*khdr));
1200 CERROR("error to decrypt tail: %d\n", rc);
1204 if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
1205 CERROR("krb5 header doesn't match\n");
1213 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
1219 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1220 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1221 struct krb5_header *khdr;
1223 rawobj_t cksum = RAWOBJ_EMPTY;
1224 rawobj_t data_desc[3], cipher;
1225 __u8 conf[GSS_MAX_CIPHER_BLOCK];
1229 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1230 LASSERT(kctx->kc_keye.kb_tfm == NULL ||
1232 ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
1235 * final token format:
1236 * ---------------------------------------------------
1237 * | krb5 header | cipher text | checksum (16 bytes) |
1238 * ---------------------------------------------------
1241 /* fill krb5 header */
1242 LASSERT(token->len >= sizeof(*khdr));
1243 khdr = (struct krb5_header *) token->data;
1244 fill_krb5_header(kctx, khdr, 1);
1246 /* generate confounder */
1247 cfs_get_random_bytes(conf, ke->ke_conf_size);
1249 /* get encryption blocksize. note kc_keye might not associated with
1250 * a tfm, currently only for arcfour-hmac */
1251 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1252 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1255 LASSERT(kctx->kc_keye.kb_tfm);
1256 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1258 LASSERT(blocksize <= ke->ke_conf_size);
1260 /* padding the message */
1261 if (add_padding(msg, msg_buflen, blocksize))
1262 return GSS_S_FAILURE;
1265 * clear text layout for checksum:
1266 * ------------------------------------------------------
1267 * | confounder | gss header | clear msgs | krb5 header |
1268 * ------------------------------------------------------
1270 data_desc[0].data = conf;
1271 data_desc[0].len = ke->ke_conf_size;
1272 data_desc[1].data = gsshdr->data;
1273 data_desc[1].len = gsshdr->len;
1274 data_desc[2].data = msg->data;
1275 data_desc[2].len = msg->len;
1277 /* compute checksum */
1278 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1279 khdr, 3, data_desc, 0, NULL, &cksum))
1280 return GSS_S_FAILURE;
1281 LASSERT(cksum.len >= ke->ke_hash_size);
1284 * clear text layout for encryption:
1285 * -----------------------------------------
1286 * | confounder | clear msgs | krb5 header |
1287 * -----------------------------------------
1289 data_desc[0].data = conf;
1290 data_desc[0].len = ke->ke_conf_size;
1291 data_desc[1].data = msg->data;
1292 data_desc[1].len = msg->len;
1293 data_desc[2].data = (__u8 *) khdr;
1294 data_desc[2].len = sizeof(*khdr);
1296 /* cipher text will be directly inplace */
1297 cipher.data = (__u8 *) (khdr + 1);
1298 cipher.len = token->len - sizeof(*khdr);
1299 LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1301 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1303 struct ll_crypto_cipher *arc4_tfm;
1305 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1306 NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1307 CERROR("failed to obtain arc4 enc key\n");
1308 GOTO(arc4_out, rc = -EACCES);
1311 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1312 if (IS_ERR(arc4_tfm)) {
1313 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1314 GOTO(arc4_out_key, rc = -EACCES);
1317 if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1319 CERROR("failed to set arc4 key, len %d\n",
1321 GOTO(arc4_out_tfm, rc = -EACCES);
1324 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1325 3, data_desc, &cipher, 1);
1327 ll_crypto_free_blkcipher(arc4_tfm);
1329 rawobj_free(&arc4_keye);
1331 do {} while(0); /* just to avoid compile warning */
1333 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1334 3, data_desc, &cipher, 1);
1338 rawobj_free(&cksum);
1339 return GSS_S_FAILURE;
1342 /* fill in checksum */
1343 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1344 memcpy((char *)(khdr + 1) + cipher.len,
1345 cksum.data + cksum.len - ke->ke_hash_size,
1347 rawobj_free(&cksum);
1349 /* final token length */
1350 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1351 return GSS_S_COMPLETE;
1355 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1356 struct ptlrpc_bulk_desc *desc)
1358 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1361 LASSERT(desc->bd_iov_count);
1362 LASSERT(desc->bd_enc_iov);
1363 LASSERT(kctx->kc_keye.kb_tfm);
1365 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1367 for (i = 0; i < desc->bd_iov_count; i++) {
1368 LASSERT(desc->bd_enc_iov[i].kiov_page);
1370 * offset should always start at page boundary of either
1371 * client or server side.
1373 if (desc->bd_iov[i].kiov_offset & blocksize) {
1374 CERROR("odd offset %d in page %d\n",
1375 desc->bd_iov[i].kiov_offset, i);
1376 return GSS_S_FAILURE;
1379 desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
1380 desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
1381 blocksize - 1) & (~(blocksize - 1));
1384 return GSS_S_COMPLETE;
1388 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1389 struct ptlrpc_bulk_desc *desc,
1390 rawobj_t *token, int adj_nob)
1392 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1393 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1394 struct krb5_header *khdr;
1396 rawobj_t cksum = RAWOBJ_EMPTY;
1397 rawobj_t data_desc[1], cipher;
1398 __u8 conf[GSS_MAX_CIPHER_BLOCK];
1402 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1405 * final token format:
1406 * --------------------------------------------------
1407 * | krb5 header | head/tail cipher text | checksum |
1408 * --------------------------------------------------
1411 /* fill krb5 header */
1412 LASSERT(token->len >= sizeof(*khdr));
1413 khdr = (struct krb5_header *) token->data;
1414 fill_krb5_header(kctx, khdr, 1);
1416 /* generate confounder */
1417 cfs_get_random_bytes(conf, ke->ke_conf_size);
1419 /* get encryption blocksize. note kc_keye might not associated with
1420 * a tfm, currently only for arcfour-hmac */
1421 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1422 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1425 LASSERT(kctx->kc_keye.kb_tfm);
1426 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1430 * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1431 * the bulk token size would be exactly (sizeof(krb5_header) +
1432 * blocksize + sizeof(krb5_header) + hashsize)
1434 LASSERT(blocksize <= ke->ke_conf_size);
1435 LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1436 LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1439 * clear text layout for checksum:
1440 * ------------------------------------------
1441 * | confounder | clear pages | krb5 header |
1442 * ------------------------------------------
1444 data_desc[0].data = conf;
1445 data_desc[0].len = ke->ke_conf_size;
1447 /* compute checksum */
1448 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1450 desc->bd_iov_count, desc->bd_iov,
1452 return GSS_S_FAILURE;
1453 LASSERT(cksum.len >= ke->ke_hash_size);
1456 * clear text layout for encryption:
1457 * ------------------------------------------
1458 * | confounder | clear pages | krb5 header |
1459 * ------------------------------------------
1461 * ---------- (cipher pages) |
1463 * -------------------------------------------
1464 * | krb5 header | cipher text | cipher text |
1465 * -------------------------------------------
1467 data_desc[0].data = conf;
1468 data_desc[0].len = ke->ke_conf_size;
1470 cipher.data = (__u8 *) (khdr + 1);
1471 cipher.len = blocksize + sizeof(*khdr);
1473 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1477 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1478 conf, desc, &cipher, adj_nob);
1482 rawobj_free(&cksum);
1483 return GSS_S_FAILURE;
1486 /* fill in checksum */
1487 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1488 memcpy((char *)(khdr + 1) + cipher.len,
1489 cksum.data + cksum.len - ke->ke_hash_size,
1491 rawobj_free(&cksum);
1493 /* final token length */
1494 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1495 return GSS_S_COMPLETE;
1499 __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
1504 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1505 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1506 struct krb5_header *khdr;
1507 unsigned char *tmpbuf;
1508 int blocksize, bodysize;
1509 rawobj_t cksum = RAWOBJ_EMPTY;
1510 rawobj_t cipher_in, plain_out;
1511 rawobj_t hash_objs[3];
1517 if (token->len < sizeof(*khdr)) {
1518 CERROR("short signature: %u\n", token->len);
1519 return GSS_S_DEFECTIVE_TOKEN;
1522 khdr = (struct krb5_header *) token->data;
1524 major = verify_krb5_header(kctx, khdr, 1);
1525 if (major != GSS_S_COMPLETE) {
1526 CERROR("bad krb5 header\n");
1531 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1532 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1535 LASSERT(kctx->kc_keye.kb_tfm);
1536 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1539 /* expected token layout:
1540 * ----------------------------------------
1541 * | krb5 header | cipher text | checksum |
1542 * ----------------------------------------
1544 bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1546 if (bodysize % blocksize) {
1547 CERROR("odd bodysize %d\n", bodysize);
1548 return GSS_S_DEFECTIVE_TOKEN;
1551 if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1552 CERROR("incomplete token: bodysize %d\n", bodysize);
1553 return GSS_S_DEFECTIVE_TOKEN;
1556 if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1557 CERROR("buffer too small: %u, require %d\n",
1558 msg->len, bodysize - ke->ke_conf_size);
1559 return GSS_S_FAILURE;
1563 OBD_ALLOC_LARGE(tmpbuf, bodysize);
1565 return GSS_S_FAILURE;
1567 major = GSS_S_FAILURE;
1569 cipher_in.data = (__u8 *) (khdr + 1);
1570 cipher_in.len = bodysize;
1571 plain_out.data = tmpbuf;
1572 plain_out.len = bodysize;
1574 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1576 struct ll_crypto_cipher *arc4_tfm;
1578 cksum.data = token->data + token->len - ke->ke_hash_size;
1579 cksum.len = ke->ke_hash_size;
1581 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1582 NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1583 CERROR("failed to obtain arc4 enc key\n");
1584 GOTO(arc4_out, rc = -EACCES);
1587 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1588 if (IS_ERR(arc4_tfm)) {
1589 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1590 GOTO(arc4_out_key, rc = -EACCES);
1593 if (ll_crypto_blkcipher_setkey(arc4_tfm,
1594 arc4_keye.data, arc4_keye.len)) {
1595 CERROR("failed to set arc4 key, len %d\n",
1597 GOTO(arc4_out_tfm, rc = -EACCES);
1600 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1601 1, &cipher_in, &plain_out, 0);
1603 ll_crypto_free_blkcipher(arc4_tfm);
1605 rawobj_free(&arc4_keye);
1607 cksum = RAWOBJ_EMPTY;
1609 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1610 1, &cipher_in, &plain_out, 0);
1614 CERROR("error decrypt\n");
1617 LASSERT(plain_out.len == bodysize);
1619 /* expected clear text layout:
1620 * -----------------------------------------
1621 * | confounder | clear msgs | krb5 header |
1622 * -----------------------------------------
1625 /* verify krb5 header in token is not modified */
1626 if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1628 CERROR("decrypted krb5 header mismatch\n");
1632 /* verify checksum, compose clear text as layout:
1633 * ------------------------------------------------------
1634 * | confounder | gss header | clear msgs | krb5 header |
1635 * ------------------------------------------------------
1637 hash_objs[0].len = ke->ke_conf_size;
1638 hash_objs[0].data = plain_out.data;
1639 hash_objs[1].len = gsshdr->len;
1640 hash_objs[1].data = gsshdr->data;
1641 hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1642 hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1643 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1644 khdr, 3, hash_objs, 0, NULL, &cksum))
1647 LASSERT(cksum.len >= ke->ke_hash_size);
1648 if (memcmp((char *)(khdr + 1) + bodysize,
1649 cksum.data + cksum.len - ke->ke_hash_size,
1650 ke->ke_hash_size)) {
1651 CERROR("checksum mismatch\n");
1655 msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr);
1656 memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1658 major = GSS_S_COMPLETE;
1660 OBD_FREE_LARGE(tmpbuf, bodysize);
1661 rawobj_free(&cksum);
1666 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1667 struct ptlrpc_bulk_desc *desc,
1668 rawobj_t *token, int adj_nob)
1670 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1671 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1672 struct krb5_header *khdr;
1674 rawobj_t cksum = RAWOBJ_EMPTY;
1675 rawobj_t cipher, plain;
1676 rawobj_t data_desc[1];
1682 if (token->len < sizeof(*khdr)) {
1683 CERROR("short signature: %u\n", token->len);
1684 return GSS_S_DEFECTIVE_TOKEN;
1687 khdr = (struct krb5_header *) token->data;
1689 major = verify_krb5_header(kctx, khdr, 1);
1690 if (major != GSS_S_COMPLETE) {
1691 CERROR("bad krb5 header\n");
1696 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1697 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1701 LASSERT(kctx->kc_keye.kb_tfm);
1702 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1704 LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1707 * token format is expected as:
1708 * -----------------------------------------------
1709 * | krb5 header | head/tail cipher text | cksum |
1710 * -----------------------------------------------
1712 if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1714 CERROR("short token size: %u\n", token->len);
1715 return GSS_S_DEFECTIVE_TOKEN;
1718 cipher.data = (__u8 *) (khdr + 1);
1719 cipher.len = blocksize + sizeof(*khdr);
1720 plain.data = cipher.data;
1721 plain.len = cipher.len;
1723 rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1724 desc, &cipher, &plain, adj_nob);
1726 return GSS_S_DEFECTIVE_TOKEN;
1729 * verify checksum, compose clear text as layout:
1730 * ------------------------------------------
1731 * | confounder | clear pages | krb5 header |
1732 * ------------------------------------------
1734 data_desc[0].data = plain.data;
1735 data_desc[0].len = blocksize;
1737 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1739 desc->bd_iov_count, desc->bd_iov,
1741 return GSS_S_FAILURE;
1742 LASSERT(cksum.len >= ke->ke_hash_size);
1744 if (memcmp(plain.data + blocksize + sizeof(*khdr),
1745 cksum.data + cksum.len - ke->ke_hash_size,
1746 ke->ke_hash_size)) {
1747 CERROR("checksum mismatch\n");
1748 rawobj_free(&cksum);
1749 return GSS_S_BAD_SIG;
1752 rawobj_free(&cksum);
1753 return GSS_S_COMPLETE;
1756 int gss_display_kerberos(struct gss_ctx *ctx,
1760 struct krb5_ctx *kctx = ctx->internal_ctx_id;
1763 written = snprintf(buf, bufsize, "krb5 (%s)",
1764 enctype2str(kctx->kc_enctype));
1768 static struct gss_api_ops gss_kerberos_ops = {
1769 .gss_import_sec_context = gss_import_sec_context_kerberos,
1770 .gss_copy_reverse_context = gss_copy_reverse_context_kerberos,
1771 .gss_inquire_context = gss_inquire_context_kerberos,
1772 .gss_get_mic = gss_get_mic_kerberos,
1773 .gss_verify_mic = gss_verify_mic_kerberos,
1774 .gss_wrap = gss_wrap_kerberos,
1775 .gss_unwrap = gss_unwrap_kerberos,
1776 .gss_prep_bulk = gss_prep_bulk_kerberos,
1777 .gss_wrap_bulk = gss_wrap_bulk_kerberos,
1778 .gss_unwrap_bulk = gss_unwrap_bulk_kerberos,
1779 .gss_delete_sec_context = gss_delete_sec_context_kerberos,
1780 .gss_display = gss_display_kerberos,
1783 static struct subflavor_desc gss_kerberos_sfs[] = {
1785 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5N,
1787 .sf_service = SPTLRPC_SVC_NULL,
1791 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5A,
1793 .sf_service = SPTLRPC_SVC_AUTH,
1797 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
1799 .sf_service = SPTLRPC_SVC_INTG,
1803 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5P,
1805 .sf_service = SPTLRPC_SVC_PRIV,
1811 * currently we leave module owner NULL
1813 static struct gss_api_mech gss_kerberos_mech = {
1814 .gm_owner = NULL, /*THIS_MODULE, */
1816 .gm_oid = (rawobj_t)
1817 {9, "\052\206\110\206\367\022\001\002\002"},
1818 .gm_ops = &gss_kerberos_ops,
1820 .gm_sfs = gss_kerberos_sfs,
1823 int __init init_kerberos_module(void)
1827 spin_lock_init(&krb5_seq_lock);
1829 status = lgss_mech_register(&gss_kerberos_mech);
1831 CERROR("Failed to register kerberos gss mechanism!\n");
1835 void __exit cleanup_kerberos_module(void)
1837 lgss_mech_unregister(&gss_kerberos_mech);