2 * Modifications for Lustre
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
6 * Copyright (c) 2011, 2014, Intel Corporation.
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * linux/net/sunrpc/gss_krb5_mech.c
13 * linux/net/sunrpc/gss_krb5_crypto.c
14 * linux/net/sunrpc/gss_krb5_seal.c
15 * linux/net/sunrpc/gss_krb5_seqnum.c
16 * linux/net/sunrpc/gss_krb5_unseal.c
18 * Copyright (c) 2001 The Regents of the University of Michigan.
19 * All rights reserved.
21 * Andy Adamson <andros@umich.edu>
22 * J. Bruce Fields <bfields@umich.edu>
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
28 * 1. Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 * 3. Neither the name of the University nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/crypto.h>
56 #include <linux/mutex.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre/lustre_idl.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
67 #include "gss_internal.h"
72 static spinlock_t krb5_seq_lock;
76 char *ke_enc_name; /* linux tfm name */
77 char *ke_hash_name; /* linux tfm name */
78 int ke_enc_mode; /* linux tfm mode */
79 int ke_hash_size; /* checksum size */
80 int ke_conf_size; /* confounder size */
81 unsigned int ke_hash_hmac:1; /* is hmac? */
85 * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
86 * but currently we simply CBC with padding, because linux doesn't support CTS
87 * yet. this need to be fixed in the future.
89 static struct krb5_enctype enctypes[] = {
90 [ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */
99 [ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */
108 [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */
109 "aes128-cts-hmac-sha1-96",
117 [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */
118 "aes256-cts-hmac-sha1-96",
126 [ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */
137 #define MAX_ENCTYPES sizeof(enctypes)/sizeof(struct krb5_enctype)
139 static const char * enctype2str(__u32 enctype)
141 if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
142 return enctypes[enctype].ke_dispname;
148 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
150 kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0);
151 if (IS_ERR(kb->kb_tfm)) {
152 CERROR("failed to alloc tfm: %s, mode %d\n",
157 if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
158 CERROR("failed to set %s key, len %d\n",
159 alg_name, kb->kb_key.len);
167 int krb5_init_keys(struct krb5_ctx *kctx)
169 struct krb5_enctype *ke;
171 if (kctx->kc_enctype >= MAX_ENCTYPES ||
172 enctypes[kctx->kc_enctype].ke_hash_size == 0) {
173 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
177 ke = &enctypes[kctx->kc_enctype];
179 /* tfm arc4 is stateful, user should alloc-use-free by his own */
180 if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
181 keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
184 /* tfm hmac is stateful, user should alloc-use-free by his own */
185 if (ke->ke_hash_hmac == 0 &&
186 keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
188 if (ke->ke_hash_hmac == 0 &&
189 keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
196 void keyblock_free(struct krb5_keyblock *kb)
198 rawobj_free(&kb->kb_key);
200 crypto_free_blkcipher(kb->kb_tfm);
204 int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
206 return rawobj_dup(&new->kb_key, &kb->kb_key);
210 int get_bytes(char **ptr, const char *end, void *res, int len)
215 if (q > end || q < p)
223 int get_rawobj(char **ptr, const char *end, rawobj_t *res)
229 if (get_bytes(&p, end, &len, sizeof(len)))
233 if (q > end || q < p)
236 OBD_ALLOC_LARGE(res->data, len);
241 memcpy(res->data, p, len);
247 int get_keyblock(char **ptr, const char *end,
248 struct krb5_keyblock *kb, __u32 keysize)
252 OBD_ALLOC_LARGE(buf, keysize);
256 if (get_bytes(ptr, end, buf, keysize)) {
257 OBD_FREE_LARGE(buf, keysize);
261 kb->kb_key.len = keysize;
262 kb->kb_key.data = buf;
267 void delete_context_kerberos(struct krb5_ctx *kctx)
269 rawobj_free(&kctx->kc_mech_used);
271 keyblock_free(&kctx->kc_keye);
272 keyblock_free(&kctx->kc_keyi);
273 keyblock_free(&kctx->kc_keyc);
277 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
279 unsigned int tmp_uint, keysize;
282 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
284 kctx->kc_seed_init = (tmp_uint != 0);
287 if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
290 /* sign/seal algorithm, not really used now */
291 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
292 get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
296 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
300 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
302 kctx->kc_seq_send = tmp_uint;
305 if (get_rawobj(&p, end, &kctx->kc_mech_used))
308 /* old style enc/seq keys in format:
312 * we decompose them to fit into the new context
316 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
319 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
322 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
326 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
327 tmp_uint != kctx->kc_enctype)
330 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
334 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
337 /* old style fallback */
338 if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
344 CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
347 return GSS_S_FAILURE;
350 /* Flags for version 2 context flags */
351 #define KRB5_CTX_FLAG_INITIATOR 0x00000001
352 #define KRB5_CTX_FLAG_CFX 0x00000002
353 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
356 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
358 unsigned int tmp_uint, keysize;
361 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
365 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
368 if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
369 kctx->kc_initiate = 1;
370 if (tmp_uint & KRB5_CTX_FLAG_CFX)
372 if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
373 kctx->kc_have_acceptor_subkey = 1;
376 if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
380 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
383 /* size of each key */
384 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
387 /* number of keys - should always be 3 */
388 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
392 CERROR("Invalid number of keys: %u\n", tmp_uint);
397 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
400 if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
403 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
406 CDEBUG(D_SEC, "succesfully imported v2 context\n");
409 return GSS_S_FAILURE;
413 * The whole purpose here is trying to keep user level gss context parsing
414 * from nfs-utils unchanged as possible as we can, they are not quite mature
415 * yet, and many stuff still not clear, like heimdal etc.
418 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
419 struct gss_ctx *gctx)
421 struct krb5_ctx *kctx;
422 char *p = (char *) inbuf->data;
423 char *end = (char *) (inbuf->data + inbuf->len);
424 unsigned int tmp_uint, rc;
426 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
427 CERROR("Fail to read version\n");
428 return GSS_S_FAILURE;
431 /* only support 0, 1 for the moment */
433 CERROR("Invalid version %u\n", tmp_uint);
434 return GSS_S_FAILURE;
439 return GSS_S_FAILURE;
441 if (tmp_uint == 0 || tmp_uint == 1) {
442 kctx->kc_initiate = tmp_uint;
443 rc = import_context_rfc1964(kctx, p, end);
445 rc = import_context_rfc4121(kctx, p, end);
449 rc = krb5_init_keys(kctx);
452 delete_context_kerberos(kctx);
455 return GSS_S_FAILURE;
458 gctx->internal_ctx_id = kctx;
459 return GSS_S_COMPLETE;
463 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
464 struct gss_ctx *gctx_new)
466 struct krb5_ctx *kctx = gctx->internal_ctx_id;
467 struct krb5_ctx *knew;
471 return GSS_S_FAILURE;
473 knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
474 knew->kc_cfx = kctx->kc_cfx;
475 knew->kc_seed_init = kctx->kc_seed_init;
476 knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
477 knew->kc_endtime = kctx->kc_endtime;
479 memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
480 knew->kc_seq_send = kctx->kc_seq_recv;
481 knew->kc_seq_recv = kctx->kc_seq_send;
482 knew->kc_enctype = kctx->kc_enctype;
484 if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
487 if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
489 if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
491 if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
493 if (krb5_init_keys(knew))
496 gctx_new->internal_ctx_id = knew;
497 CDEBUG(D_SEC, "succesfully copied reverse context\n");
498 return GSS_S_COMPLETE;
501 delete_context_kerberos(knew);
503 return GSS_S_FAILURE;
507 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
508 unsigned long *endtime)
510 struct krb5_ctx *kctx = gctx->internal_ctx_id;
512 *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
513 return GSS_S_COMPLETE;
517 void gss_delete_sec_context_kerberos(void *internal_ctx)
519 struct krb5_ctx *kctx = internal_ctx;
521 delete_context_kerberos(kctx);
526 void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
528 sg_init_table(sg, 1);
529 sg_set_buf(sg, ptr, len);
533 __u32 krb5_encrypt(struct crypto_blkcipher *tfm,
540 struct blkcipher_desc desc;
541 struct scatterlist sg;
542 __u8 local_iv[16] = {0};
547 desc.info = local_iv;
550 if (length % crypto_blkcipher_blocksize(tfm) != 0) {
551 CERROR("output length %d mismatch blocksize %d\n",
552 length, crypto_blkcipher_blocksize(tfm));
556 if (crypto_blkcipher_ivsize(tfm) > 16) {
557 CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
562 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
564 memcpy(out, in, length);
565 buf_to_sg(&sg, out, length);
568 ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
570 ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
577 int krb5_digest_hmac(struct crypto_hash *tfm,
579 struct krb5_header *khdr,
580 int msgcnt, rawobj_t *msgs,
581 int iovcnt, lnet_kiov_t *iovs,
584 struct hash_desc desc;
585 struct scatterlist sg[1];
588 crypto_hash_setkey(tfm, key->data, key->len);
592 crypto_hash_init(&desc);
594 for (i = 0; i < msgcnt; i++) {
595 if (msgs[i].len == 0)
597 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
598 crypto_hash_update(&desc, sg, msgs[i].len);
601 for (i = 0; i < iovcnt; i++) {
602 if (iovs[i].kiov_len == 0)
605 sg_init_table(sg, 1);
606 sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
607 iovs[i].kiov_offset);
608 crypto_hash_update(&desc, sg, iovs[i].kiov_len);
612 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
613 crypto_hash_update(&desc, sg, sizeof(*khdr));
616 return crypto_hash_final(&desc, cksum->data);
620 int krb5_digest_norm(struct crypto_hash *tfm,
621 struct krb5_keyblock *kb,
622 struct krb5_header *khdr,
623 int msgcnt, rawobj_t *msgs,
624 int iovcnt, lnet_kiov_t *iovs,
627 struct hash_desc desc;
628 struct scatterlist sg[1];
635 crypto_hash_init(&desc);
637 for (i = 0; i < msgcnt; i++) {
638 if (msgs[i].len == 0)
640 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
641 crypto_hash_update(&desc, sg, msgs[i].len);
644 for (i = 0; i < iovcnt; i++) {
645 if (iovs[i].kiov_len == 0)
648 sg_init_table(sg, 1);
649 sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
650 iovs[i].kiov_offset);
651 crypto_hash_update(&desc, sg, iovs[i].kiov_len);
655 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
656 crypto_hash_update(&desc, sg, sizeof(*khdr));
659 crypto_hash_final(&desc, cksum->data);
661 return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
662 cksum->data, cksum->len);
666 * compute (keyed/keyless) checksum against the plain text which appended
667 * with krb5 wire token header.
670 __s32 krb5_make_checksum(__u32 enctype,
671 struct krb5_keyblock *kb,
672 struct krb5_header *khdr,
673 int msgcnt, rawobj_t *msgs,
674 int iovcnt, lnet_kiov_t *iovs,
677 struct krb5_enctype *ke = &enctypes[enctype];
678 struct crypto_hash *tfm;
679 __u32 code = GSS_S_FAILURE;
682 if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
683 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
684 return GSS_S_FAILURE;
687 cksum->len = crypto_hash_digestsize(tfm);
688 OBD_ALLOC_LARGE(cksum->data, cksum->len);
694 if (ke->ke_hash_hmac)
695 rc = krb5_digest_hmac(tfm, &kb->kb_key,
696 khdr, msgcnt, msgs, iovcnt, iovs, cksum);
698 rc = krb5_digest_norm(tfm, kb,
699 khdr, msgcnt, msgs, iovcnt, iovs, cksum);
702 code = GSS_S_COMPLETE;
704 crypto_free_hash(tfm);
708 static void fill_krb5_header(struct krb5_ctx *kctx,
709 struct krb5_header *khdr,
712 unsigned char acceptor_flag;
714 acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
717 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
718 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
719 khdr->kh_ec = cpu_to_be16(0);
720 khdr->kh_rrc = cpu_to_be16(0);
722 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
723 khdr->kh_flags = acceptor_flag;
724 khdr->kh_ec = cpu_to_be16(0xffff);
725 khdr->kh_rrc = cpu_to_be16(0xffff);
728 khdr->kh_filler = 0xff;
729 spin_lock(&krb5_seq_lock);
730 khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
731 spin_unlock(&krb5_seq_lock);
734 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
735 struct krb5_header *khdr,
738 unsigned char acceptor_flag;
739 __u16 tok_id, ec_rrc;
741 acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
744 tok_id = KG_TOK_WRAP_MSG;
747 tok_id = KG_TOK_MIC_MSG;
752 if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
753 CERROR("bad token id\n");
754 return GSS_S_DEFECTIVE_TOKEN;
756 if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
757 CERROR("bad direction flag\n");
758 return GSS_S_BAD_SIG;
760 if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
761 CERROR("missing confidential flag\n");
762 return GSS_S_BAD_SIG;
764 if (khdr->kh_filler != 0xff) {
765 CERROR("bad filler\n");
766 return GSS_S_DEFECTIVE_TOKEN;
768 if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
769 be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
770 CERROR("bad EC or RRC\n");
771 return GSS_S_DEFECTIVE_TOKEN;
773 return GSS_S_COMPLETE;
777 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
784 struct krb5_ctx *kctx = gctx->internal_ctx_id;
785 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
786 struct krb5_header *khdr;
787 rawobj_t cksum = RAWOBJ_EMPTY;
789 /* fill krb5 header */
790 LASSERT(token->len >= sizeof(*khdr));
791 khdr = (struct krb5_header *) token->data;
792 fill_krb5_header(kctx, khdr, 0);
795 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
796 khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
797 return GSS_S_FAILURE;
799 LASSERT(cksum.len >= ke->ke_hash_size);
800 LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
801 memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
804 token->len = sizeof(*khdr) + ke->ke_hash_size;
806 return GSS_S_COMPLETE;
810 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
817 struct krb5_ctx *kctx = gctx->internal_ctx_id;
818 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
819 struct krb5_header *khdr;
820 rawobj_t cksum = RAWOBJ_EMPTY;
823 if (token->len < sizeof(*khdr)) {
824 CERROR("short signature: %u\n", token->len);
825 return GSS_S_DEFECTIVE_TOKEN;
828 khdr = (struct krb5_header *) token->data;
830 major = verify_krb5_header(kctx, khdr, 0);
831 if (major != GSS_S_COMPLETE) {
832 CERROR("bad krb5 header\n");
836 if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
837 CERROR("short signature: %u, require %d\n",
838 token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
839 return GSS_S_FAILURE;
842 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
843 khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
844 CERROR("failed to make checksum\n");
845 return GSS_S_FAILURE;
848 LASSERT(cksum.len >= ke->ke_hash_size);
849 if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
851 CERROR("checksum mismatch\n");
853 return GSS_S_BAD_SIG;
857 return GSS_S_COMPLETE;
861 int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
865 padding = (blocksize - (msg->len & (blocksize - 1))) &
870 if (msg->len + padding > msg_buflen) {
871 CERROR("bufsize %u too small: datalen %u, padding %u\n",
872 msg_buflen, msg->len, padding);
876 memset(msg->data + msg->len, padding, padding);
882 int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm,
889 struct blkcipher_desc desc;
890 struct scatterlist src, dst;
891 __u8 local_iv[16] = {0}, *buf;
898 desc.info = local_iv;
901 for (i = 0; i < inobj_cnt; i++) {
902 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
904 buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
905 buf_to_sg(&dst, buf, outobj->len - datalen);
909 rc = crypto_blkcipher_encrypt(
910 &desc, &dst, &src, src.length);
912 rc = crypto_blkcipher_decrypt(
913 &desc, &dst, &src, src.length);
916 rc = crypto_blkcipher_encrypt_iv(
917 &desc, &dst, &src, src.length);
919 rc = crypto_blkcipher_decrypt_iv(
920 &desc, &dst, &src, src.length);
924 CERROR("encrypt error %d\n", rc);
928 datalen += inobjs[i].len;
929 buf += inobjs[i].len;
932 outobj->len = datalen;
937 * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
940 int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
941 struct krb5_header *khdr,
943 struct ptlrpc_bulk_desc *desc,
947 struct blkcipher_desc ciph_desc;
948 __u8 local_iv[16] = {0};
949 struct scatterlist src, dst;
950 int blocksize, i, rc, nob = 0;
952 LASSERT(desc->bd_iov_count);
953 LASSERT(desc->bd_enc_iov);
955 blocksize = crypto_blkcipher_blocksize(tfm);
956 LASSERT(blocksize > 1);
957 LASSERT(cipher->len == blocksize + sizeof(*khdr));
960 ciph_desc.info = local_iv;
963 /* encrypt confounder */
964 buf_to_sg(&src, confounder, blocksize);
965 buf_to_sg(&dst, cipher->data, blocksize);
967 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
969 CERROR("error to encrypt confounder: %d\n", rc);
973 /* encrypt clear pages */
974 for (i = 0; i < desc->bd_iov_count; i++) {
975 sg_init_table(&src, 1);
976 sg_set_page(&src, desc->bd_iov[i].kiov_page,
977 (desc->bd_iov[i].kiov_len + blocksize - 1) &
979 desc->bd_iov[i].kiov_offset);
982 sg_init_table(&dst, 1);
983 sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
986 desc->bd_enc_iov[i].kiov_offset = dst.offset;
987 desc->bd_enc_iov[i].kiov_len = dst.length;
989 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
992 CERROR("error to encrypt page: %d\n", rc);
997 /* encrypt krb5 header */
998 buf_to_sg(&src, khdr, sizeof(*khdr));
999 buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1001 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
1004 CERROR("error to encrypt krb5 header: %d\n", rc);
1015 * desc->bd_nob_transferred is the size of cipher text received.
1016 * desc->bd_nob is the target size of plain text supposed to be.
1018 * if adj_nob != 0, we adjust each page's kiov_len to the actual
1020 * - for client read: we don't know data size for each page, so
1021 * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
1022 * be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
1023 * this means we DO NOT support the situation that server send an odd size
1024 * data in a page which is not the last one.
1025 * - for server write: we knows exactly data size for each page being expected,
1026 * thus kiov_len is accurate already, so we should not adjust it at all.
1027 * and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
1028 * should have been done by prep_bulk().
1031 int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
1032 struct krb5_header *khdr,
1033 struct ptlrpc_bulk_desc *desc,
1038 struct blkcipher_desc ciph_desc;
1039 __u8 local_iv[16] = {0};
1040 struct scatterlist src, dst;
1041 int ct_nob = 0, pt_nob = 0;
1042 int blocksize, i, rc;
1044 LASSERT(desc->bd_iov_count);
1045 LASSERT(desc->bd_enc_iov);
1046 LASSERT(desc->bd_nob_transferred);
1048 blocksize = crypto_blkcipher_blocksize(tfm);
1049 LASSERT(blocksize > 1);
1050 LASSERT(cipher->len == blocksize + sizeof(*khdr));
1052 ciph_desc.tfm = tfm;
1053 ciph_desc.info = local_iv;
1054 ciph_desc.flags = 0;
1056 if (desc->bd_nob_transferred % blocksize) {
1057 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
1061 /* decrypt head (confounder) */
1062 buf_to_sg(&src, cipher->data, blocksize);
1063 buf_to_sg(&dst, plain->data, blocksize);
1065 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
1067 CERROR("error to decrypt confounder: %d\n", rc);
1071 for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
1073 if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
1074 desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
1075 CERROR("page %d: odd offset %u len %u, blocksize %d\n",
1076 i, desc->bd_enc_iov[i].kiov_offset,
1077 desc->bd_enc_iov[i].kiov_len, blocksize);
1082 if (ct_nob + desc->bd_enc_iov[i].kiov_len >
1083 desc->bd_nob_transferred)
1084 desc->bd_enc_iov[i].kiov_len =
1085 desc->bd_nob_transferred - ct_nob;
1087 desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
1088 if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
1089 desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
1091 /* this should be guaranteed by LNET */
1092 LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
1093 desc->bd_nob_transferred);
1094 LASSERT(desc->bd_iov[i].kiov_len <=
1095 desc->bd_enc_iov[i].kiov_len);
1098 if (desc->bd_enc_iov[i].kiov_len == 0)
1101 sg_init_table(&src, 1);
1102 sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
1103 desc->bd_enc_iov[i].kiov_len,
1104 desc->bd_enc_iov[i].kiov_offset);
1106 if (desc->bd_iov[i].kiov_len % blocksize == 0)
1107 sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
1109 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
1112 CERROR("error to decrypt page: %d\n", rc);
1116 if (desc->bd_iov[i].kiov_len % blocksize != 0) {
1117 memcpy(page_address(desc->bd_iov[i].kiov_page) +
1118 desc->bd_iov[i].kiov_offset,
1119 page_address(desc->bd_enc_iov[i].kiov_page) +
1120 desc->bd_iov[i].kiov_offset,
1121 desc->bd_iov[i].kiov_len);
1124 ct_nob += desc->bd_enc_iov[i].kiov_len;
1125 pt_nob += desc->bd_iov[i].kiov_len;
1128 if (unlikely(ct_nob != desc->bd_nob_transferred)) {
1129 CERROR("%d cipher text transferred but only %d decrypted\n",
1130 desc->bd_nob_transferred, ct_nob);
1134 if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
1135 CERROR("%d plain text expected but only %d received\n",
1136 desc->bd_nob, pt_nob);
1140 /* if needed, clear up the rest unused iovs */
1142 while (i < desc->bd_iov_count)
1143 desc->bd_iov[i++].kiov_len = 0;
1145 /* decrypt tail (krb5 header) */
1146 buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
1147 buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1149 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
1152 CERROR("error to decrypt tail: %d\n", rc);
1156 if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
1157 CERROR("krb5 header doesn't match\n");
1165 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
1171 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1172 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1173 struct krb5_header *khdr;
1175 rawobj_t cksum = RAWOBJ_EMPTY;
1176 rawobj_t data_desc[3], cipher;
1177 __u8 conf[GSS_MAX_CIPHER_BLOCK];
1181 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1182 LASSERT(kctx->kc_keye.kb_tfm == NULL ||
1184 crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
1187 * final token format:
1188 * ---------------------------------------------------
1189 * | krb5 header | cipher text | checksum (16 bytes) |
1190 * ---------------------------------------------------
1193 /* fill krb5 header */
1194 LASSERT(token->len >= sizeof(*khdr));
1195 khdr = (struct krb5_header *) token->data;
1196 fill_krb5_header(kctx, khdr, 1);
1198 /* generate confounder */
1199 cfs_get_random_bytes(conf, ke->ke_conf_size);
1201 /* get encryption blocksize. note kc_keye might not associated with
1202 * a tfm, currently only for arcfour-hmac */
1203 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1204 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1207 LASSERT(kctx->kc_keye.kb_tfm);
1208 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1210 LASSERT(blocksize <= ke->ke_conf_size);
1212 /* padding the message */
1213 if (add_padding(msg, msg_buflen, blocksize))
1214 return GSS_S_FAILURE;
1217 * clear text layout for checksum:
1218 * ------------------------------------------------------
1219 * | confounder | gss header | clear msgs | krb5 header |
1220 * ------------------------------------------------------
1222 data_desc[0].data = conf;
1223 data_desc[0].len = ke->ke_conf_size;
1224 data_desc[1].data = gsshdr->data;
1225 data_desc[1].len = gsshdr->len;
1226 data_desc[2].data = msg->data;
1227 data_desc[2].len = msg->len;
1229 /* compute checksum */
1230 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1231 khdr, 3, data_desc, 0, NULL, &cksum))
1232 return GSS_S_FAILURE;
1233 LASSERT(cksum.len >= ke->ke_hash_size);
1236 * clear text layout for encryption:
1237 * -----------------------------------------
1238 * | confounder | clear msgs | krb5 header |
1239 * -----------------------------------------
1241 data_desc[0].data = conf;
1242 data_desc[0].len = ke->ke_conf_size;
1243 data_desc[1].data = msg->data;
1244 data_desc[1].len = msg->len;
1245 data_desc[2].data = (__u8 *) khdr;
1246 data_desc[2].len = sizeof(*khdr);
1248 /* cipher text will be directly inplace */
1249 cipher.data = (__u8 *) (khdr + 1);
1250 cipher.len = token->len - sizeof(*khdr);
1251 LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1253 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1255 struct crypto_blkcipher *arc4_tfm;
1257 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1258 NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1259 CERROR("failed to obtain arc4 enc key\n");
1260 GOTO(arc4_out, rc = -EACCES);
1263 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1264 if (IS_ERR(arc4_tfm)) {
1265 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1266 GOTO(arc4_out_key, rc = -EACCES);
1269 if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1271 CERROR("failed to set arc4 key, len %d\n",
1273 GOTO(arc4_out_tfm, rc = -EACCES);
1276 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1277 3, data_desc, &cipher, 1);
1279 crypto_free_blkcipher(arc4_tfm);
1281 rawobj_free(&arc4_keye);
1283 do {} while(0); /* just to avoid compile warning */
1285 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1286 3, data_desc, &cipher, 1);
1290 rawobj_free(&cksum);
1291 return GSS_S_FAILURE;
1294 /* fill in checksum */
1295 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1296 memcpy((char *)(khdr + 1) + cipher.len,
1297 cksum.data + cksum.len - ke->ke_hash_size,
1299 rawobj_free(&cksum);
1301 /* final token length */
1302 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1303 return GSS_S_COMPLETE;
1307 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1308 struct ptlrpc_bulk_desc *desc)
1310 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1313 LASSERT(desc->bd_iov_count);
1314 LASSERT(desc->bd_enc_iov);
1315 LASSERT(kctx->kc_keye.kb_tfm);
1317 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1319 for (i = 0; i < desc->bd_iov_count; i++) {
1320 LASSERT(desc->bd_enc_iov[i].kiov_page);
1322 * offset should always start at page boundary of either
1323 * client or server side.
1325 if (desc->bd_iov[i].kiov_offset & blocksize) {
1326 CERROR("odd offset %d in page %d\n",
1327 desc->bd_iov[i].kiov_offset, i);
1328 return GSS_S_FAILURE;
1331 desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
1332 desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
1333 blocksize - 1) & (~(blocksize - 1));
1336 return GSS_S_COMPLETE;
1340 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1341 struct ptlrpc_bulk_desc *desc,
1342 rawobj_t *token, int adj_nob)
1344 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1345 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1346 struct krb5_header *khdr;
1348 rawobj_t cksum = RAWOBJ_EMPTY;
1349 rawobj_t data_desc[1], cipher;
1350 __u8 conf[GSS_MAX_CIPHER_BLOCK];
1354 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1357 * final token format:
1358 * --------------------------------------------------
1359 * | krb5 header | head/tail cipher text | checksum |
1360 * --------------------------------------------------
1363 /* fill krb5 header */
1364 LASSERT(token->len >= sizeof(*khdr));
1365 khdr = (struct krb5_header *) token->data;
1366 fill_krb5_header(kctx, khdr, 1);
1368 /* generate confounder */
1369 cfs_get_random_bytes(conf, ke->ke_conf_size);
1371 /* get encryption blocksize. note kc_keye might not associated with
1372 * a tfm, currently only for arcfour-hmac */
1373 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1374 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1377 LASSERT(kctx->kc_keye.kb_tfm);
1378 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1382 * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1383 * the bulk token size would be exactly (sizeof(krb5_header) +
1384 * blocksize + sizeof(krb5_header) + hashsize)
1386 LASSERT(blocksize <= ke->ke_conf_size);
1387 LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1388 LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1391 * clear text layout for checksum:
1392 * ------------------------------------------
1393 * | confounder | clear pages | krb5 header |
1394 * ------------------------------------------
1396 data_desc[0].data = conf;
1397 data_desc[0].len = ke->ke_conf_size;
1399 /* compute checksum */
1400 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1402 desc->bd_iov_count, desc->bd_iov,
1404 return GSS_S_FAILURE;
1405 LASSERT(cksum.len >= ke->ke_hash_size);
1408 * clear text layout for encryption:
1409 * ------------------------------------------
1410 * | confounder | clear pages | krb5 header |
1411 * ------------------------------------------
1413 * ---------- (cipher pages) |
1415 * -------------------------------------------
1416 * | krb5 header | cipher text | cipher text |
1417 * -------------------------------------------
1419 data_desc[0].data = conf;
1420 data_desc[0].len = ke->ke_conf_size;
1422 cipher.data = (__u8 *) (khdr + 1);
1423 cipher.len = blocksize + sizeof(*khdr);
1425 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1429 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1430 conf, desc, &cipher, adj_nob);
1434 rawobj_free(&cksum);
1435 return GSS_S_FAILURE;
1438 /* fill in checksum */
1439 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1440 memcpy((char *)(khdr + 1) + cipher.len,
1441 cksum.data + cksum.len - ke->ke_hash_size,
1443 rawobj_free(&cksum);
1445 /* final token length */
1446 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1447 return GSS_S_COMPLETE;
1451 __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
1456 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1457 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1458 struct krb5_header *khdr;
1459 unsigned char *tmpbuf;
1460 int blocksize, bodysize;
1461 rawobj_t cksum = RAWOBJ_EMPTY;
1462 rawobj_t cipher_in, plain_out;
1463 rawobj_t hash_objs[3];
1469 if (token->len < sizeof(*khdr)) {
1470 CERROR("short signature: %u\n", token->len);
1471 return GSS_S_DEFECTIVE_TOKEN;
1474 khdr = (struct krb5_header *) token->data;
1476 major = verify_krb5_header(kctx, khdr, 1);
1477 if (major != GSS_S_COMPLETE) {
1478 CERROR("bad krb5 header\n");
1483 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1484 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1487 LASSERT(kctx->kc_keye.kb_tfm);
1488 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1491 /* expected token layout:
1492 * ----------------------------------------
1493 * | krb5 header | cipher text | checksum |
1494 * ----------------------------------------
1496 bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1498 if (bodysize % blocksize) {
1499 CERROR("odd bodysize %d\n", bodysize);
1500 return GSS_S_DEFECTIVE_TOKEN;
1503 if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1504 CERROR("incomplete token: bodysize %d\n", bodysize);
1505 return GSS_S_DEFECTIVE_TOKEN;
1508 if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1509 CERROR("buffer too small: %u, require %d\n",
1510 msg->len, bodysize - ke->ke_conf_size);
1511 return GSS_S_FAILURE;
1515 OBD_ALLOC_LARGE(tmpbuf, bodysize);
1517 return GSS_S_FAILURE;
1519 major = GSS_S_FAILURE;
1521 cipher_in.data = (__u8 *) (khdr + 1);
1522 cipher_in.len = bodysize;
1523 plain_out.data = tmpbuf;
1524 plain_out.len = bodysize;
1526 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1528 struct crypto_blkcipher *arc4_tfm;
1530 cksum.data = token->data + token->len - ke->ke_hash_size;
1531 cksum.len = ke->ke_hash_size;
1533 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1534 NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1535 CERROR("failed to obtain arc4 enc key\n");
1536 GOTO(arc4_out, rc = -EACCES);
1539 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1540 if (IS_ERR(arc4_tfm)) {
1541 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1542 GOTO(arc4_out_key, rc = -EACCES);
1545 if (crypto_blkcipher_setkey(arc4_tfm,
1546 arc4_keye.data, arc4_keye.len)) {
1547 CERROR("failed to set arc4 key, len %d\n",
1549 GOTO(arc4_out_tfm, rc = -EACCES);
1552 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1553 1, &cipher_in, &plain_out, 0);
1555 crypto_free_blkcipher(arc4_tfm);
1557 rawobj_free(&arc4_keye);
1559 cksum = RAWOBJ_EMPTY;
1561 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1562 1, &cipher_in, &plain_out, 0);
1566 CERROR("error decrypt\n");
1569 LASSERT(plain_out.len == bodysize);
1571 /* expected clear text layout:
1572 * -----------------------------------------
1573 * | confounder | clear msgs | krb5 header |
1574 * -----------------------------------------
1577 /* verify krb5 header in token is not modified */
1578 if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1580 CERROR("decrypted krb5 header mismatch\n");
1584 /* verify checksum, compose clear text as layout:
1585 * ------------------------------------------------------
1586 * | confounder | gss header | clear msgs | krb5 header |
1587 * ------------------------------------------------------
1589 hash_objs[0].len = ke->ke_conf_size;
1590 hash_objs[0].data = plain_out.data;
1591 hash_objs[1].len = gsshdr->len;
1592 hash_objs[1].data = gsshdr->data;
1593 hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1594 hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1595 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1596 khdr, 3, hash_objs, 0, NULL, &cksum))
1599 LASSERT(cksum.len >= ke->ke_hash_size);
1600 if (memcmp((char *)(khdr + 1) + bodysize,
1601 cksum.data + cksum.len - ke->ke_hash_size,
1602 ke->ke_hash_size)) {
1603 CERROR("checksum mismatch\n");
1607 msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr);
1608 memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1610 major = GSS_S_COMPLETE;
1612 OBD_FREE_LARGE(tmpbuf, bodysize);
1613 rawobj_free(&cksum);
1618 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1619 struct ptlrpc_bulk_desc *desc,
1620 rawobj_t *token, int adj_nob)
1622 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1623 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1624 struct krb5_header *khdr;
1626 rawobj_t cksum = RAWOBJ_EMPTY;
1627 rawobj_t cipher, plain;
1628 rawobj_t data_desc[1];
1634 if (token->len < sizeof(*khdr)) {
1635 CERROR("short signature: %u\n", token->len);
1636 return GSS_S_DEFECTIVE_TOKEN;
1639 khdr = (struct krb5_header *) token->data;
1641 major = verify_krb5_header(kctx, khdr, 1);
1642 if (major != GSS_S_COMPLETE) {
1643 CERROR("bad krb5 header\n");
1648 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1649 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1653 LASSERT(kctx->kc_keye.kb_tfm);
1654 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1656 LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1659 * token format is expected as:
1660 * -----------------------------------------------
1661 * | krb5 header | head/tail cipher text | cksum |
1662 * -----------------------------------------------
1664 if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1666 CERROR("short token size: %u\n", token->len);
1667 return GSS_S_DEFECTIVE_TOKEN;
1670 cipher.data = (__u8 *) (khdr + 1);
1671 cipher.len = blocksize + sizeof(*khdr);
1672 plain.data = cipher.data;
1673 plain.len = cipher.len;
1675 rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1676 desc, &cipher, &plain, adj_nob);
1678 return GSS_S_DEFECTIVE_TOKEN;
1681 * verify checksum, compose clear text as layout:
1682 * ------------------------------------------
1683 * | confounder | clear pages | krb5 header |
1684 * ------------------------------------------
1686 data_desc[0].data = plain.data;
1687 data_desc[0].len = blocksize;
1689 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1691 desc->bd_iov_count, desc->bd_iov,
1693 return GSS_S_FAILURE;
1694 LASSERT(cksum.len >= ke->ke_hash_size);
1696 if (memcmp(plain.data + blocksize + sizeof(*khdr),
1697 cksum.data + cksum.len - ke->ke_hash_size,
1698 ke->ke_hash_size)) {
1699 CERROR("checksum mismatch\n");
1700 rawobj_free(&cksum);
1701 return GSS_S_BAD_SIG;
1704 rawobj_free(&cksum);
1705 return GSS_S_COMPLETE;
1708 int gss_display_kerberos(struct gss_ctx *ctx,
1712 struct krb5_ctx *kctx = ctx->internal_ctx_id;
1715 written = snprintf(buf, bufsize, "krb5 (%s)",
1716 enctype2str(kctx->kc_enctype));
1720 static struct gss_api_ops gss_kerberos_ops = {
1721 .gss_import_sec_context = gss_import_sec_context_kerberos,
1722 .gss_copy_reverse_context = gss_copy_reverse_context_kerberos,
1723 .gss_inquire_context = gss_inquire_context_kerberos,
1724 .gss_get_mic = gss_get_mic_kerberos,
1725 .gss_verify_mic = gss_verify_mic_kerberos,
1726 .gss_wrap = gss_wrap_kerberos,
1727 .gss_unwrap = gss_unwrap_kerberos,
1728 .gss_prep_bulk = gss_prep_bulk_kerberos,
1729 .gss_wrap_bulk = gss_wrap_bulk_kerberos,
1730 .gss_unwrap_bulk = gss_unwrap_bulk_kerberos,
1731 .gss_delete_sec_context = gss_delete_sec_context_kerberos,
1732 .gss_display = gss_display_kerberos,
1735 static struct subflavor_desc gss_kerberos_sfs[] = {
1737 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5N,
1739 .sf_service = SPTLRPC_SVC_NULL,
1743 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5A,
1745 .sf_service = SPTLRPC_SVC_AUTH,
1749 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
1751 .sf_service = SPTLRPC_SVC_INTG,
1755 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5P,
1757 .sf_service = SPTLRPC_SVC_PRIV,
1763 * currently we leave module owner NULL
1765 static struct gss_api_mech gss_kerberos_mech = {
1766 .gm_owner = NULL, /*THIS_MODULE, */
1768 .gm_oid = (rawobj_t)
1769 {9, "\052\206\110\206\367\022\001\002\002"},
1770 .gm_ops = &gss_kerberos_ops,
1772 .gm_sfs = gss_kerberos_sfs,
1775 int __init init_kerberos_module(void)
1779 spin_lock_init(&krb5_seq_lock);
1781 status = lgss_mech_register(&gss_kerberos_mech);
1783 CERROR("Failed to register kerberos gss mechanism!\n");
1787 void cleanup_kerberos_module(void)
1789 lgss_mech_unregister(&gss_kerberos_mech);