1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
6 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * linux/net/sunrpc/gss_krb5_mech.c
13 * linux/net/sunrpc/gss_krb5_crypto.c
14 * linux/net/sunrpc/gss_krb5_seal.c
15 * linux/net/sunrpc/gss_krb5_seqnum.c
16 * linux/net/sunrpc/gss_krb5_unseal.c
18 * Copyright (c) 2001 The Regents of the University of Michigan.
19 * All rights reserved.
21 * Andy Adamson <andros@umich.edu>
22 * J. Bruce Fields <bfields@umich.edu>
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
28 * 1. Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 * 3. Neither the name of the University nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 # define EXPORT_SYMTAB
54 #define DEBUG_SUBSYSTEM S_SEC
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/slab.h>
59 #include <linux/crypto.h>
60 #include <linux/mutex.h>
62 #include <liblustre.h>
66 #include <obd_class.h>
67 #include <obd_support.h>
68 #include <lustre/lustre_idl.h>
69 #include <lustre_net.h>
70 #include <lustre_import.h>
71 #include <lustre_sec.h>
74 #include "gss_internal.h"
79 static cfs_spinlock_t krb5_seq_lock;
83 char *ke_enc_name; /* linux tfm name */
84 char *ke_hash_name; /* linux tfm name */
85 int ke_enc_mode; /* linux tfm mode */
86 int ke_hash_size; /* checksum size */
87 int ke_conf_size; /* confounder size */
88 unsigned int ke_hash_hmac:1; /* is hmac? */
92 * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
93 * but currently we simply CBC with padding, because linux doesn't support CTS
94 * yet. this need to be fixed in the future.
96 static struct krb5_enctype enctypes[] = {
97 [ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */
106 [ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */
115 [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */
116 "aes128-cts-hmac-sha1-96",
124 [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */
125 "aes256-cts-hmac-sha1-96",
133 [ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */
144 #define MAX_ENCTYPES sizeof(enctypes)/sizeof(struct krb5_enctype)
146 static const char * enctype2str(__u32 enctype)
148 if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
149 return enctypes[enctype].ke_dispname;
155 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
157 kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
158 if (kb->kb_tfm == NULL) {
159 CERROR("failed to alloc tfm: %s, mode %d\n",
164 if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
165 CERROR("failed to set %s key, len %d\n",
166 alg_name, kb->kb_key.len);
174 int krb5_init_keys(struct krb5_ctx *kctx)
176 struct krb5_enctype *ke;
178 if (kctx->kc_enctype >= MAX_ENCTYPES ||
179 enctypes[kctx->kc_enctype].ke_hash_size == 0) {
180 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
184 ke = &enctypes[kctx->kc_enctype];
186 /* tfm arc4 is stateful, user should alloc-use-free by his own */
187 if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
188 keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
191 /* tfm hmac is stateful, user should alloc-use-free by his own */
192 if (ke->ke_hash_hmac == 0 &&
193 keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
195 if (ke->ke_hash_hmac == 0 &&
196 keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
203 void keyblock_free(struct krb5_keyblock *kb)
205 rawobj_free(&kb->kb_key);
207 ll_crypto_free_blkcipher(kb->kb_tfm);
211 int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
213 return rawobj_dup(&new->kb_key, &kb->kb_key);
217 int get_bytes(char **ptr, const char *end, void *res, int len)
222 if (q > end || q < p)
230 int get_rawobj(char **ptr, const char *end, rawobj_t *res)
236 if (get_bytes(&p, end, &len, sizeof(len)))
240 if (q > end || q < p)
243 OBD_ALLOC_LARGE(res->data, len);
248 memcpy(res->data, p, len);
254 int get_keyblock(char **ptr, const char *end,
255 struct krb5_keyblock *kb, __u32 keysize)
259 OBD_ALLOC_LARGE(buf, keysize);
263 if (get_bytes(ptr, end, buf, keysize)) {
264 OBD_FREE_LARGE(buf, keysize);
268 kb->kb_key.len = keysize;
269 kb->kb_key.data = buf;
274 void delete_context_kerberos(struct krb5_ctx *kctx)
276 rawobj_free(&kctx->kc_mech_used);
278 keyblock_free(&kctx->kc_keye);
279 keyblock_free(&kctx->kc_keyi);
280 keyblock_free(&kctx->kc_keyc);
284 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
286 unsigned int tmp_uint, keysize;
289 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
291 kctx->kc_seed_init = (tmp_uint != 0);
294 if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
297 /* sign/seal algorithm, not really used now */
298 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
299 get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
303 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
307 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
309 kctx->kc_seq_send = tmp_uint;
312 if (get_rawobj(&p, end, &kctx->kc_mech_used))
315 /* old style enc/seq keys in format:
319 * we decompose them to fit into the new context
323 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
326 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
329 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
333 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
334 tmp_uint != kctx->kc_enctype)
337 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
341 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
344 /* old style fallback */
345 if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
351 CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
354 return GSS_S_FAILURE;
357 /* Flags for version 2 context flags */
358 #define KRB5_CTX_FLAG_INITIATOR 0x00000001
359 #define KRB5_CTX_FLAG_CFX 0x00000002
360 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
363 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
365 unsigned int tmp_uint, keysize;
368 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
372 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
375 if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
376 kctx->kc_initiate = 1;
377 if (tmp_uint & KRB5_CTX_FLAG_CFX)
379 if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
380 kctx->kc_have_acceptor_subkey = 1;
383 if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
387 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
390 /* size of each key */
391 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
394 /* number of keys - should always be 3 */
395 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
399 CERROR("Invalid number of keys: %u\n", tmp_uint);
404 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
407 if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
410 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
413 CDEBUG(D_SEC, "succesfully imported v2 context\n");
416 return GSS_S_FAILURE;
420 * The whole purpose here is trying to keep user level gss context parsing
421 * from nfs-utils unchanged as possible as we can, they are not quite mature
422 * yet, and many stuff still not clear, like heimdal etc.
425 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
426 struct gss_ctx *gctx)
428 struct krb5_ctx *kctx;
429 char *p = (char *) inbuf->data;
430 char *end = (char *) (inbuf->data + inbuf->len);
431 unsigned int tmp_uint, rc;
433 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
434 CERROR("Fail to read version\n");
435 return GSS_S_FAILURE;
438 /* only support 0, 1 for the moment */
440 CERROR("Invalid version %u\n", tmp_uint);
441 return GSS_S_FAILURE;
446 return GSS_S_FAILURE;
448 if (tmp_uint == 0 || tmp_uint == 1) {
449 kctx->kc_initiate = tmp_uint;
450 rc = import_context_rfc1964(kctx, p, end);
452 rc = import_context_rfc4121(kctx, p, end);
456 rc = krb5_init_keys(kctx);
459 delete_context_kerberos(kctx);
462 return GSS_S_FAILURE;
465 gctx->internal_ctx_id = kctx;
466 return GSS_S_COMPLETE;
470 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
471 struct gss_ctx *gctx_new)
473 struct krb5_ctx *kctx = gctx->internal_ctx_id;
474 struct krb5_ctx *knew;
478 return GSS_S_FAILURE;
480 knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
481 knew->kc_cfx = kctx->kc_cfx;
482 knew->kc_seed_init = kctx->kc_seed_init;
483 knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
484 knew->kc_endtime = kctx->kc_endtime;
486 memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
487 knew->kc_seq_send = kctx->kc_seq_recv;
488 knew->kc_seq_recv = kctx->kc_seq_send;
489 knew->kc_enctype = kctx->kc_enctype;
491 if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
494 if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
496 if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
498 if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
500 if (krb5_init_keys(knew))
503 gctx_new->internal_ctx_id = knew;
504 CDEBUG(D_SEC, "succesfully copied reverse context\n");
505 return GSS_S_COMPLETE;
508 delete_context_kerberos(knew);
510 return GSS_S_FAILURE;
514 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
515 unsigned long *endtime)
517 struct krb5_ctx *kctx = gctx->internal_ctx_id;
519 *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
520 return GSS_S_COMPLETE;
524 void gss_delete_sec_context_kerberos(void *internal_ctx)
526 struct krb5_ctx *kctx = internal_ctx;
528 delete_context_kerberos(kctx);
533 void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
535 sg->page = virt_to_page(ptr);
536 sg->offset = offset_in_page(ptr);
541 __u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
548 struct blkcipher_desc desc;
549 struct scatterlist sg;
550 __u8 local_iv[16] = {0};
555 desc.info = local_iv;
558 if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
559 CERROR("output length %d mismatch blocksize %d\n",
560 length, ll_crypto_blkcipher_blocksize(tfm));
564 if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
565 CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
570 memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
572 memcpy(out, in, length);
573 buf_to_sg(&sg, out, length);
576 ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
578 ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
584 #ifdef HAVE_ASYNC_BLOCK_CIPHER
587 int krb5_digest_hmac(struct ll_crypto_hash *tfm,
589 struct krb5_header *khdr,
590 int msgcnt, rawobj_t *msgs,
591 int iovcnt, lnet_kiov_t *iovs,
594 struct hash_desc desc;
595 struct scatterlist sg[1];
598 ll_crypto_hash_setkey(tfm, key->data, key->len);
602 ll_crypto_hash_init(&desc);
604 for (i = 0; i < msgcnt; i++) {
605 if (msgs[i].len == 0)
607 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
608 ll_crypto_hash_update(&desc, sg, msgs[i].len);
611 for (i = 0; i < iovcnt; i++) {
612 if (iovs[i].kiov_len == 0)
614 sg[0].page = iovs[i].kiov_page;
615 sg[0].offset = iovs[i].kiov_offset;
616 sg[0].length = iovs[i].kiov_len;
617 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
621 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
622 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
625 return ll_crypto_hash_final(&desc, cksum->data);
628 #else /* ! HAVE_ASYNC_BLOCK_CIPHER */
631 int krb5_digest_hmac(struct ll_crypto_hash *tfm,
633 struct krb5_header *khdr,
634 int msgcnt, rawobj_t *msgs,
635 int iovcnt, lnet_kiov_t *iovs,
638 struct scatterlist sg[1];
639 __u32 keylen = key->len, i;
641 crypto_hmac_init(tfm, key->data, &keylen);
643 for (i = 0; i < msgcnt; i++) {
644 if (msgs[i].len == 0)
646 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
647 crypto_hmac_update(tfm, sg, 1);
650 for (i = 0; i < iovcnt; i++) {
651 if (iovs[i].kiov_len == 0)
653 sg[0].page = iovs[i].kiov_page;
654 sg[0].offset = iovs[i].kiov_offset;
655 sg[0].length = iovs[i].kiov_len;
656 crypto_hmac_update(tfm, sg, 1);
660 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
661 crypto_hmac_update(tfm, sg, 1);
664 crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
668 #endif /* HAVE_ASYNC_BLOCK_CIPHER */
671 int krb5_digest_norm(struct ll_crypto_hash *tfm,
672 struct krb5_keyblock *kb,
673 struct krb5_header *khdr,
674 int msgcnt, rawobj_t *msgs,
675 int iovcnt, lnet_kiov_t *iovs,
678 struct hash_desc desc;
679 struct scatterlist sg[1];
686 ll_crypto_hash_init(&desc);
688 for (i = 0; i < msgcnt; i++) {
689 if (msgs[i].len == 0)
691 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
692 ll_crypto_hash_update(&desc, sg, msgs[i].len);
695 for (i = 0; i < iovcnt; i++) {
696 if (iovs[i].kiov_len == 0)
698 sg[0].page = iovs[i].kiov_page;
699 sg[0].offset = iovs[i].kiov_offset;
700 sg[0].length = iovs[i].kiov_len;
701 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
705 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
706 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
709 ll_crypto_hash_final(&desc, cksum->data);
711 return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
712 cksum->data, cksum->len);
716 * compute (keyed/keyless) checksum against the plain text which appended
717 * with krb5 wire token header.
720 __s32 krb5_make_checksum(__u32 enctype,
721 struct krb5_keyblock *kb,
722 struct krb5_header *khdr,
723 int msgcnt, rawobj_t *msgs,
724 int iovcnt, lnet_kiov_t *iovs,
727 struct krb5_enctype *ke = &enctypes[enctype];
728 struct ll_crypto_hash *tfm;
729 __u32 code = GSS_S_FAILURE;
732 if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
733 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
734 return GSS_S_FAILURE;
737 cksum->len = ll_crypto_hash_digestsize(tfm);
738 OBD_ALLOC_LARGE(cksum->data, cksum->len);
744 if (ke->ke_hash_hmac)
745 rc = krb5_digest_hmac(tfm, &kb->kb_key,
746 khdr, msgcnt, msgs, iovcnt, iovs, cksum);
748 rc = krb5_digest_norm(tfm, kb,
749 khdr, msgcnt, msgs, iovcnt, iovs, cksum);
752 code = GSS_S_COMPLETE;
754 ll_crypto_free_hash(tfm);
758 static void fill_krb5_header(struct krb5_ctx *kctx,
759 struct krb5_header *khdr,
762 unsigned char acceptor_flag;
764 acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
767 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
768 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
769 khdr->kh_ec = cpu_to_be16(0);
770 khdr->kh_rrc = cpu_to_be16(0);
772 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
773 khdr->kh_flags = acceptor_flag;
774 khdr->kh_ec = cpu_to_be16(0xffff);
775 khdr->kh_rrc = cpu_to_be16(0xffff);
778 khdr->kh_filler = 0xff;
779 cfs_spin_lock(&krb5_seq_lock);
780 khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
781 cfs_spin_unlock(&krb5_seq_lock);
784 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
785 struct krb5_header *khdr,
788 unsigned char acceptor_flag;
789 __u16 tok_id, ec_rrc;
791 acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
794 tok_id = KG_TOK_WRAP_MSG;
797 tok_id = KG_TOK_MIC_MSG;
802 if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
803 CERROR("bad token id\n");
804 return GSS_S_DEFECTIVE_TOKEN;
806 if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
807 CERROR("bad direction flag\n");
808 return GSS_S_BAD_SIG;
810 if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
811 CERROR("missing confidential flag\n");
812 return GSS_S_BAD_SIG;
814 if (khdr->kh_filler != 0xff) {
815 CERROR("bad filler\n");
816 return GSS_S_DEFECTIVE_TOKEN;
818 if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
819 be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
820 CERROR("bad EC or RRC\n");
821 return GSS_S_DEFECTIVE_TOKEN;
823 return GSS_S_COMPLETE;
827 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
834 struct krb5_ctx *kctx = gctx->internal_ctx_id;
835 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
836 struct krb5_header *khdr;
837 rawobj_t cksum = RAWOBJ_EMPTY;
839 /* fill krb5 header */
840 LASSERT(token->len >= sizeof(*khdr));
841 khdr = (struct krb5_header *) token->data;
842 fill_krb5_header(kctx, khdr, 0);
845 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
846 khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
847 return GSS_S_FAILURE;
849 LASSERT(cksum.len >= ke->ke_hash_size);
850 LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
851 memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
854 token->len = sizeof(*khdr) + ke->ke_hash_size;
856 return GSS_S_COMPLETE;
860 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
867 struct krb5_ctx *kctx = gctx->internal_ctx_id;
868 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
869 struct krb5_header *khdr;
870 rawobj_t cksum = RAWOBJ_EMPTY;
873 if (token->len < sizeof(*khdr)) {
874 CERROR("short signature: %u\n", token->len);
875 return GSS_S_DEFECTIVE_TOKEN;
878 khdr = (struct krb5_header *) token->data;
880 major = verify_krb5_header(kctx, khdr, 0);
881 if (major != GSS_S_COMPLETE) {
882 CERROR("bad krb5 header\n");
886 if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
887 CERROR("short signature: %u, require %d\n",
888 token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
889 return GSS_S_FAILURE;
892 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
893 khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
894 CERROR("failed to make checksum\n");
895 return GSS_S_FAILURE;
898 LASSERT(cksum.len >= ke->ke_hash_size);
899 if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
901 CERROR("checksum mismatch\n");
903 return GSS_S_BAD_SIG;
907 return GSS_S_COMPLETE;
911 int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
915 padding = (blocksize - (msg->len & (blocksize - 1))) &
920 if (msg->len + padding > msg_buflen) {
921 CERROR("bufsize %u too small: datalen %u, padding %u\n",
922 msg_buflen, msg->len, padding);
926 memset(msg->data + msg->len, padding, padding);
932 int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
939 struct blkcipher_desc desc;
940 struct scatterlist src, dst;
941 __u8 local_iv[16] = {0}, *buf;
948 desc.info = local_iv;
951 for (i = 0; i < inobj_cnt; i++) {
952 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
954 buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
955 buf_to_sg(&dst, buf, outobj->len - datalen);
959 rc = ll_crypto_blkcipher_encrypt(
960 &desc, &dst, &src, src.length);
962 rc = ll_crypto_blkcipher_decrypt(
963 &desc, &dst, &src, src.length);
966 rc = ll_crypto_blkcipher_encrypt_iv(
967 &desc, &dst, &src, src.length);
969 rc = ll_crypto_blkcipher_decrypt_iv(
970 &desc, &dst, &src, src.length);
974 CERROR("encrypt error %d\n", rc);
978 datalen += inobjs[i].len;
979 buf += inobjs[i].len;
982 outobj->len = datalen;
987 * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
990 int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
991 struct krb5_header *khdr,
993 struct ptlrpc_bulk_desc *desc,
997 struct blkcipher_desc ciph_desc;
998 __u8 local_iv[16] = {0};
999 struct scatterlist src, dst;
1000 int blocksize, i, rc, nob = 0;
1002 LASSERT(desc->bd_iov_count);
1003 LASSERT(desc->bd_enc_iov);
1005 blocksize = ll_crypto_blkcipher_blocksize(tfm);
1006 LASSERT(blocksize > 1);
1007 LASSERT(cipher->len == blocksize + sizeof(*khdr));
1009 ciph_desc.tfm = tfm;
1010 ciph_desc.info = local_iv;
1011 ciph_desc.flags = 0;
1013 /* encrypt confounder */
1014 buf_to_sg(&src, confounder, blocksize);
1015 buf_to_sg(&dst, cipher->data, blocksize);
1017 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
1019 CERROR("error to encrypt confounder: %d\n", rc);
1023 /* encrypt clear pages */
1024 for (i = 0; i < desc->bd_iov_count; i++) {
1025 src.page = desc->bd_iov[i].kiov_page;
1026 src.offset = desc->bd_iov[i].kiov_offset;
1027 src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
1033 dst.page = desc->bd_enc_iov[i].kiov_page;
1034 dst.offset = src.offset;
1035 dst.length = src.length;
1037 desc->bd_enc_iov[i].kiov_offset = dst.offset;
1038 desc->bd_enc_iov[i].kiov_len = dst.length;
1040 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
1043 CERROR("error to encrypt page: %d\n", rc);
1048 /* encrypt krb5 header */
1049 buf_to_sg(&src, khdr, sizeof(*khdr));
1050 buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1052 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
1053 &dst, &src, sizeof(*khdr));
1055 CERROR("error to encrypt krb5 header: %d\n", rc);
1066 * desc->bd_nob_transferred is the size of cipher text received.
1067 * desc->bd_nob is the target size of plain text supposed to be.
1069 * if adj_nob != 0, we adjust each page's kiov_len to the actual
1071 * - for client read: we don't know data size for each page, so
1072 * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
1073 * be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
1074 * this means we DO NOT support the situation that server send an odd size
1075 * data in a page which is not the last one.
1076 * - for server write: we knows exactly data size for each page being expected,
1077 * thus kiov_len is accurate already, so we should not adjust it at all.
1078 * and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
1079 * should have been done by prep_bulk().
1082 int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
1083 struct krb5_header *khdr,
1084 struct ptlrpc_bulk_desc *desc,
1089 struct blkcipher_desc ciph_desc;
1090 __u8 local_iv[16] = {0};
1091 struct scatterlist src, dst;
1092 int ct_nob = 0, pt_nob = 0;
1093 int blocksize, i, rc;
1095 LASSERT(desc->bd_iov_count);
1096 LASSERT(desc->bd_enc_iov);
1097 LASSERT(desc->bd_nob_transferred);
1099 blocksize = ll_crypto_blkcipher_blocksize(tfm);
1100 LASSERT(blocksize > 1);
1101 LASSERT(cipher->len == blocksize + sizeof(*khdr));
1103 ciph_desc.tfm = tfm;
1104 ciph_desc.info = local_iv;
1105 ciph_desc.flags = 0;
1107 if (desc->bd_nob_transferred % blocksize) {
1108 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
1112 /* decrypt head (confounder) */
1113 buf_to_sg(&src, cipher->data, blocksize);
1114 buf_to_sg(&dst, plain->data, blocksize);
1116 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
1118 CERROR("error to decrypt confounder: %d\n", rc);
1122 for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
1124 if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
1125 desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
1126 CERROR("page %d: odd offset %u len %u, blocksize %d\n",
1127 i, desc->bd_enc_iov[i].kiov_offset,
1128 desc->bd_enc_iov[i].kiov_len, blocksize);
1133 if (ct_nob + desc->bd_enc_iov[i].kiov_len >
1134 desc->bd_nob_transferred)
1135 desc->bd_enc_iov[i].kiov_len =
1136 desc->bd_nob_transferred - ct_nob;
1138 desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
1139 if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
1140 desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
1142 /* this should be guaranteed by LNET */
1143 LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
1144 desc->bd_nob_transferred);
1145 LASSERT(desc->bd_iov[i].kiov_len <=
1146 desc->bd_enc_iov[i].kiov_len);
1149 if (desc->bd_enc_iov[i].kiov_len == 0)
1152 src.page = desc->bd_enc_iov[i].kiov_page;
1153 src.offset = desc->bd_enc_iov[i].kiov_offset;
1154 src.length = desc->bd_enc_iov[i].kiov_len;
1157 if (desc->bd_iov[i].kiov_len % blocksize == 0)
1158 dst.page = desc->bd_iov[i].kiov_page;
1160 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
1163 CERROR("error to decrypt page: %d\n", rc);
1167 if (desc->bd_iov[i].kiov_len % blocksize != 0) {
1168 memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) +
1169 desc->bd_iov[i].kiov_offset,
1170 cfs_page_address(desc->bd_enc_iov[i].kiov_page) +
1171 desc->bd_iov[i].kiov_offset,
1172 desc->bd_iov[i].kiov_len);
1175 ct_nob += desc->bd_enc_iov[i].kiov_len;
1176 pt_nob += desc->bd_iov[i].kiov_len;
1179 if (unlikely(ct_nob != desc->bd_nob_transferred)) {
1180 CERROR("%d cipher text transferred but only %d decrypted\n",
1181 desc->bd_nob_transferred, ct_nob);
1185 if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
1186 CERROR("%d plain text expected but only %d received\n",
1187 desc->bd_nob, pt_nob);
1191 /* if needed, clear up the rest unused iovs */
1193 while (i < desc->bd_iov_count)
1194 desc->bd_iov[i++].kiov_len = 0;
1196 /* decrypt tail (krb5 header) */
1197 buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
1198 buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1200 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
1201 &dst, &src, sizeof(*khdr));
1203 CERROR("error to decrypt tail: %d\n", rc);
1207 if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
1208 CERROR("krb5 header doesn't match\n");
1216 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
1222 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1223 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1224 struct krb5_header *khdr;
1226 rawobj_t cksum = RAWOBJ_EMPTY;
1227 rawobj_t data_desc[3], cipher;
1228 __u8 conf[GSS_MAX_CIPHER_BLOCK];
1232 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1233 LASSERT(kctx->kc_keye.kb_tfm == NULL ||
1235 ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
1238 * final token format:
1239 * ---------------------------------------------------
1240 * | krb5 header | cipher text | checksum (16 bytes) |
1241 * ---------------------------------------------------
1244 /* fill krb5 header */
1245 LASSERT(token->len >= sizeof(*khdr));
1246 khdr = (struct krb5_header *) token->data;
1247 fill_krb5_header(kctx, khdr, 1);
1249 /* generate confounder */
1250 cfs_get_random_bytes(conf, ke->ke_conf_size);
1252 /* get encryption blocksize. note kc_keye might not associated with
1253 * a tfm, currently only for arcfour-hmac */
1254 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1255 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1258 LASSERT(kctx->kc_keye.kb_tfm);
1259 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1261 LASSERT(blocksize <= ke->ke_conf_size);
1263 /* padding the message */
1264 if (add_padding(msg, msg_buflen, blocksize))
1265 return GSS_S_FAILURE;
1268 * clear text layout for checksum:
1269 * ------------------------------------------------------
1270 * | confounder | gss header | clear msgs | krb5 header |
1271 * ------------------------------------------------------
1273 data_desc[0].data = conf;
1274 data_desc[0].len = ke->ke_conf_size;
1275 data_desc[1].data = gsshdr->data;
1276 data_desc[1].len = gsshdr->len;
1277 data_desc[2].data = msg->data;
1278 data_desc[2].len = msg->len;
1280 /* compute checksum */
1281 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1282 khdr, 3, data_desc, 0, NULL, &cksum))
1283 return GSS_S_FAILURE;
1284 LASSERT(cksum.len >= ke->ke_hash_size);
1287 * clear text layout for encryption:
1288 * -----------------------------------------
1289 * | confounder | clear msgs | krb5 header |
1290 * -----------------------------------------
1292 data_desc[0].data = conf;
1293 data_desc[0].len = ke->ke_conf_size;
1294 data_desc[1].data = msg->data;
1295 data_desc[1].len = msg->len;
1296 data_desc[2].data = (__u8 *) khdr;
1297 data_desc[2].len = sizeof(*khdr);
1299 /* cipher text will be directly inplace */
1300 cipher.data = (__u8 *) (khdr + 1);
1301 cipher.len = token->len - sizeof(*khdr);
1302 LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1304 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1306 struct ll_crypto_cipher *arc4_tfm;
1308 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1309 NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1310 CERROR("failed to obtain arc4 enc key\n");
1311 GOTO(arc4_out, rc = -EACCES);
1314 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1315 if (arc4_tfm == NULL) {
1316 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1317 GOTO(arc4_out_key, rc = -EACCES);
1320 if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1322 CERROR("failed to set arc4 key, len %d\n",
1324 GOTO(arc4_out_tfm, rc = -EACCES);
1327 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1328 3, data_desc, &cipher, 1);
1330 ll_crypto_free_blkcipher(arc4_tfm);
1332 rawobj_free(&arc4_keye);
1334 do {} while(0); /* just to avoid compile warning */
1336 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1337 3, data_desc, &cipher, 1);
1341 rawobj_free(&cksum);
1342 return GSS_S_FAILURE;
1345 /* fill in checksum */
1346 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1347 memcpy((char *)(khdr + 1) + cipher.len,
1348 cksum.data + cksum.len - ke->ke_hash_size,
1350 rawobj_free(&cksum);
1352 /* final token length */
1353 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1354 return GSS_S_COMPLETE;
1358 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1359 struct ptlrpc_bulk_desc *desc)
1361 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1364 LASSERT(desc->bd_iov_count);
1365 LASSERT(desc->bd_enc_iov);
1366 LASSERT(kctx->kc_keye.kb_tfm);
1368 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1370 for (i = 0; i < desc->bd_iov_count; i++) {
1371 LASSERT(desc->bd_enc_iov[i].kiov_page);
1373 * offset should always start at page boundary of either
1374 * client or server side.
1376 if (desc->bd_iov[i].kiov_offset & blocksize) {
1377 CERROR("odd offset %d in page %d\n",
1378 desc->bd_iov[i].kiov_offset, i);
1379 return GSS_S_FAILURE;
1382 desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
1383 desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
1384 blocksize - 1) & (~(blocksize - 1));
1387 return GSS_S_COMPLETE;
1391 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1392 struct ptlrpc_bulk_desc *desc,
1393 rawobj_t *token, int adj_nob)
1395 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1396 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1397 struct krb5_header *khdr;
1399 rawobj_t cksum = RAWOBJ_EMPTY;
1400 rawobj_t data_desc[1], cipher;
1401 __u8 conf[GSS_MAX_CIPHER_BLOCK];
1405 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1408 * final token format:
1409 * --------------------------------------------------
1410 * | krb5 header | head/tail cipher text | checksum |
1411 * --------------------------------------------------
1414 /* fill krb5 header */
1415 LASSERT(token->len >= sizeof(*khdr));
1416 khdr = (struct krb5_header *) token->data;
1417 fill_krb5_header(kctx, khdr, 1);
1419 /* generate confounder */
1420 cfs_get_random_bytes(conf, ke->ke_conf_size);
1422 /* get encryption blocksize. note kc_keye might not associated with
1423 * a tfm, currently only for arcfour-hmac */
1424 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1425 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1428 LASSERT(kctx->kc_keye.kb_tfm);
1429 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1433 * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1434 * the bulk token size would be exactly (sizeof(krb5_header) +
1435 * blocksize + sizeof(krb5_header) + hashsize)
1437 LASSERT(blocksize <= ke->ke_conf_size);
1438 LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1439 LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1442 * clear text layout for checksum:
1443 * ------------------------------------------
1444 * | confounder | clear pages | krb5 header |
1445 * ------------------------------------------
1447 data_desc[0].data = conf;
1448 data_desc[0].len = ke->ke_conf_size;
1450 /* compute checksum */
1451 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1453 desc->bd_iov_count, desc->bd_iov,
1455 return GSS_S_FAILURE;
1456 LASSERT(cksum.len >= ke->ke_hash_size);
1459 * clear text layout for encryption:
1460 * ------------------------------------------
1461 * | confounder | clear pages | krb5 header |
1462 * ------------------------------------------
1464 * ---------- (cipher pages) |
1466 * -------------------------------------------
1467 * | krb5 header | cipher text | cipher text |
1468 * -------------------------------------------
1470 data_desc[0].data = conf;
1471 data_desc[0].len = ke->ke_conf_size;
1473 cipher.data = (__u8 *) (khdr + 1);
1474 cipher.len = blocksize + sizeof(*khdr);
1476 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1480 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1481 conf, desc, &cipher, adj_nob);
1485 rawobj_free(&cksum);
1486 return GSS_S_FAILURE;
1489 /* fill in checksum */
1490 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1491 memcpy((char *)(khdr + 1) + cipher.len,
1492 cksum.data + cksum.len - ke->ke_hash_size,
1494 rawobj_free(&cksum);
1496 /* final token length */
1497 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1498 return GSS_S_COMPLETE;
1502 __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
1507 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1508 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1509 struct krb5_header *khdr;
1510 unsigned char *tmpbuf;
1511 int blocksize, bodysize;
1512 rawobj_t cksum = RAWOBJ_EMPTY;
1513 rawobj_t cipher_in, plain_out;
1514 rawobj_t hash_objs[3];
1520 if (token->len < sizeof(*khdr)) {
1521 CERROR("short signature: %u\n", token->len);
1522 return GSS_S_DEFECTIVE_TOKEN;
1525 khdr = (struct krb5_header *) token->data;
1527 major = verify_krb5_header(kctx, khdr, 1);
1528 if (major != GSS_S_COMPLETE) {
1529 CERROR("bad krb5 header\n");
1534 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1535 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1538 LASSERT(kctx->kc_keye.kb_tfm);
1539 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1542 /* expected token layout:
1543 * ----------------------------------------
1544 * | krb5 header | cipher text | checksum |
1545 * ----------------------------------------
1547 bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1549 if (bodysize % blocksize) {
1550 CERROR("odd bodysize %d\n", bodysize);
1551 return GSS_S_DEFECTIVE_TOKEN;
1554 if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1555 CERROR("incomplete token: bodysize %d\n", bodysize);
1556 return GSS_S_DEFECTIVE_TOKEN;
1559 if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1560 CERROR("buffer too small: %u, require %d\n",
1561 msg->len, bodysize - ke->ke_conf_size);
1562 return GSS_S_FAILURE;
1566 OBD_ALLOC_LARGE(tmpbuf, bodysize);
1568 return GSS_S_FAILURE;
1570 major = GSS_S_FAILURE;
1572 cipher_in.data = (__u8 *) (khdr + 1);
1573 cipher_in.len = bodysize;
1574 plain_out.data = tmpbuf;
1575 plain_out.len = bodysize;
1577 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1579 struct ll_crypto_cipher *arc4_tfm;
1581 cksum.data = token->data + token->len - ke->ke_hash_size;
1582 cksum.len = ke->ke_hash_size;
1584 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1585 NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1586 CERROR("failed to obtain arc4 enc key\n");
1587 GOTO(arc4_out, rc = -EACCES);
1590 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1591 if (arc4_tfm == NULL) {
1592 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1593 GOTO(arc4_out_key, rc = -EACCES);
1596 if (ll_crypto_blkcipher_setkey(arc4_tfm,
1597 arc4_keye.data, arc4_keye.len)) {
1598 CERROR("failed to set arc4 key, len %d\n",
1600 GOTO(arc4_out_tfm, rc = -EACCES);
1603 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1604 1, &cipher_in, &plain_out, 0);
1606 ll_crypto_free_blkcipher(arc4_tfm);
1608 rawobj_free(&arc4_keye);
1610 cksum = RAWOBJ_EMPTY;
1612 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1613 1, &cipher_in, &plain_out, 0);
1617 CERROR("error decrypt\n");
1620 LASSERT(plain_out.len == bodysize);
1622 /* expected clear text layout:
1623 * -----------------------------------------
1624 * | confounder | clear msgs | krb5 header |
1625 * -----------------------------------------
1628 /* verify krb5 header in token is not modified */
1629 if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1631 CERROR("decrypted krb5 header mismatch\n");
1635 /* verify checksum, compose clear text as layout:
1636 * ------------------------------------------------------
1637 * | confounder | gss header | clear msgs | krb5 header |
1638 * ------------------------------------------------------
1640 hash_objs[0].len = ke->ke_conf_size;
1641 hash_objs[0].data = plain_out.data;
1642 hash_objs[1].len = gsshdr->len;
1643 hash_objs[1].data = gsshdr->data;
1644 hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1645 hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1646 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1647 khdr, 3, hash_objs, 0, NULL, &cksum))
1650 LASSERT(cksum.len >= ke->ke_hash_size);
1651 if (memcmp((char *)(khdr + 1) + bodysize,
1652 cksum.data + cksum.len - ke->ke_hash_size,
1653 ke->ke_hash_size)) {
1654 CERROR("checksum mismatch\n");
1658 msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr);
1659 memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1661 major = GSS_S_COMPLETE;
1663 OBD_FREE_LARGE(tmpbuf, bodysize);
1664 rawobj_free(&cksum);
1669 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1670 struct ptlrpc_bulk_desc *desc,
1671 rawobj_t *token, int adj_nob)
1673 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1674 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1675 struct krb5_header *khdr;
1677 rawobj_t cksum = RAWOBJ_EMPTY;
1678 rawobj_t cipher, plain;
1679 rawobj_t data_desc[1];
1685 if (token->len < sizeof(*khdr)) {
1686 CERROR("short signature: %u\n", token->len);
1687 return GSS_S_DEFECTIVE_TOKEN;
1690 khdr = (struct krb5_header *) token->data;
1692 major = verify_krb5_header(kctx, khdr, 1);
1693 if (major != GSS_S_COMPLETE) {
1694 CERROR("bad krb5 header\n");
1699 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1700 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1704 LASSERT(kctx->kc_keye.kb_tfm);
1705 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1707 LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1710 * token format is expected as:
1711 * -----------------------------------------------
1712 * | krb5 header | head/tail cipher text | cksum |
1713 * -----------------------------------------------
1715 if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1717 CERROR("short token size: %u\n", token->len);
1718 return GSS_S_DEFECTIVE_TOKEN;
1721 cipher.data = (__u8 *) (khdr + 1);
1722 cipher.len = blocksize + sizeof(*khdr);
1723 plain.data = cipher.data;
1724 plain.len = cipher.len;
1726 rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1727 desc, &cipher, &plain, adj_nob);
1729 return GSS_S_DEFECTIVE_TOKEN;
1732 * verify checksum, compose clear text as layout:
1733 * ------------------------------------------
1734 * | confounder | clear pages | krb5 header |
1735 * ------------------------------------------
1737 data_desc[0].data = plain.data;
1738 data_desc[0].len = blocksize;
1740 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1742 desc->bd_iov_count, desc->bd_iov,
1744 return GSS_S_FAILURE;
1745 LASSERT(cksum.len >= ke->ke_hash_size);
1747 if (memcmp(plain.data + blocksize + sizeof(*khdr),
1748 cksum.data + cksum.len - ke->ke_hash_size,
1749 ke->ke_hash_size)) {
1750 CERROR("checksum mismatch\n");
1751 rawobj_free(&cksum);
1752 return GSS_S_BAD_SIG;
1755 rawobj_free(&cksum);
1756 return GSS_S_COMPLETE;
1759 int gss_display_kerberos(struct gss_ctx *ctx,
1763 struct krb5_ctx *kctx = ctx->internal_ctx_id;
1766 written = snprintf(buf, bufsize, "krb5 (%s)",
1767 enctype2str(kctx->kc_enctype));
1771 static struct gss_api_ops gss_kerberos_ops = {
1772 .gss_import_sec_context = gss_import_sec_context_kerberos,
1773 .gss_copy_reverse_context = gss_copy_reverse_context_kerberos,
1774 .gss_inquire_context = gss_inquire_context_kerberos,
1775 .gss_get_mic = gss_get_mic_kerberos,
1776 .gss_verify_mic = gss_verify_mic_kerberos,
1777 .gss_wrap = gss_wrap_kerberos,
1778 .gss_unwrap = gss_unwrap_kerberos,
1779 .gss_prep_bulk = gss_prep_bulk_kerberos,
1780 .gss_wrap_bulk = gss_wrap_bulk_kerberos,
1781 .gss_unwrap_bulk = gss_unwrap_bulk_kerberos,
1782 .gss_delete_sec_context = gss_delete_sec_context_kerberos,
1783 .gss_display = gss_display_kerberos,
1786 static struct subflavor_desc gss_kerberos_sfs[] = {
1788 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5N,
1790 .sf_service = SPTLRPC_SVC_NULL,
1794 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5A,
1796 .sf_service = SPTLRPC_SVC_AUTH,
1800 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
1802 .sf_service = SPTLRPC_SVC_INTG,
1806 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5P,
1808 .sf_service = SPTLRPC_SVC_PRIV,
1814 * currently we leave module owner NULL
1816 static struct gss_api_mech gss_kerberos_mech = {
1817 .gm_owner = NULL, /*THIS_MODULE, */
1819 .gm_oid = (rawobj_t)
1820 {9, "\052\206\110\206\367\022\001\002\002"},
1821 .gm_ops = &gss_kerberos_ops,
1823 .gm_sfs = gss_kerberos_sfs,
1826 int __init init_kerberos_module(void)
1830 cfs_spin_lock_init(&krb5_seq_lock);
1832 status = lgss_mech_register(&gss_kerberos_mech);
1834 CERROR("Failed to register kerberos gss mechanism!\n");
1838 void __exit cleanup_kerberos_module(void)
1840 lgss_mech_unregister(&gss_kerberos_mech);