2 * Modifications for Lustre
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
6 * Copyright (c) 2011, 2015, Intel Corporation.
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * linux/net/sunrpc/gss_krb5_mech.c
13 * linux/net/sunrpc/gss_krb5_crypto.c
14 * linux/net/sunrpc/gss_krb5_seal.c
15 * linux/net/sunrpc/gss_krb5_seqnum.c
16 * linux/net/sunrpc/gss_krb5_unseal.c
18 * Copyright (c) 2001 The Regents of the University of Michigan.
19 * All rights reserved.
21 * Andy Adamson <andros@umich.edu>
22 * J. Bruce Fields <bfields@umich.edu>
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
28 * 1. Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 * 3. Neither the name of the University nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/random.h>
55 #include <linux/slab.h>
56 #include <linux/crypto.h>
57 #include <linux/mutex.h>
60 #include <obd_class.h>
61 #include <obd_support.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
67 #include "gss_internal.h"
71 #include "gss_crypto.h"
73 static DEFINE_SPINLOCK(krb5_seq_lock);
77 char *ke_enc_name; /* linux tfm name */
78 char *ke_hash_name; /* linux tfm name */
79 int ke_enc_mode; /* linux tfm mode */
80 int ke_hash_size; /* checksum size */
81 int ke_conf_size; /* confounder size */
82 unsigned int ke_hash_hmac:1; /* is hmac? */
86 * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
87 * but currently we simply CBC with padding, because linux doesn't support CTS
88 * yet. this need to be fixed in the future.
90 static struct krb5_enctype enctypes[] = {
91 [ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */
92 .ke_dispname = "des-cbc-md5",
93 .ke_enc_name = "cbc(des)",
94 .ke_hash_name = "md5",
98 [ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */
99 .ke_dispname = "des3-hmac-sha1",
100 .ke_enc_name = "cbc(des3_ede)",
101 .ke_hash_name = "sha1",
106 [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */
107 .ke_dispname = "aes128-cts-hmac-sha1-96",
108 .ke_enc_name = "cbc(aes)",
109 .ke_hash_name = "sha1",
114 [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */
115 .ke_dispname = "aes256-cts-hmac-sha1-96",
116 .ke_enc_name = "cbc(aes)",
117 .ke_hash_name = "sha1",
122 [ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */
123 .ke_dispname = "arcfour-hmac-md5",
124 .ke_enc_name = "ecb(arc4)",
125 .ke_hash_name = "md5",
132 static const char * enctype2str(__u32 enctype)
134 if (enctype < ARRAY_SIZE(enctypes) && enctypes[enctype].ke_dispname)
135 return enctypes[enctype].ke_dispname;
141 int krb5_init_keys(struct krb5_ctx *kctx)
143 struct krb5_enctype *ke;
145 if (kctx->kc_enctype >= ARRAY_SIZE(enctypes) ||
146 enctypes[kctx->kc_enctype].ke_hash_size == 0) {
147 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
151 ke = &enctypes[kctx->kc_enctype];
153 /* tfm arc4 is stateful, user should alloc-use-free by his own */
154 if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
155 gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
158 /* tfm hmac is stateful, user should alloc-use-free by his own */
159 if (ke->ke_hash_hmac == 0 &&
160 gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
162 if (ke->ke_hash_hmac == 0 &&
163 gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
170 void delete_context_kerberos(struct krb5_ctx *kctx)
172 rawobj_free(&kctx->kc_mech_used);
174 gss_keyblock_free(&kctx->kc_keye);
175 gss_keyblock_free(&kctx->kc_keyi);
176 gss_keyblock_free(&kctx->kc_keyc);
180 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
182 unsigned int tmp_uint, keysize;
185 if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
187 kctx->kc_seed_init = (tmp_uint != 0);
190 if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
193 /* sign/seal algorithm, not really used now */
194 if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
195 gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
198 /* end time. While kc_endtime might be 64 bit the krb5 API
199 * still uses 32 bits. To delay the 2038 bug see the incoming
200 * value as a u32 which give us until 2106. See the link for details:
202 * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
204 if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
208 if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
210 kctx->kc_seq_send = tmp_uint;
213 if (gss_get_rawobj(&p, end, &kctx->kc_mech_used))
216 /* old style enc/seq keys in format:
220 * we decompose them to fit into the new context
224 if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
227 if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
230 if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
234 if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
235 tmp_uint != kctx->kc_enctype)
238 if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
242 if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
245 /* old style fallback */
246 if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
252 CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
255 return GSS_S_FAILURE;
258 /* Flags for version 2 context flags */
259 #define KRB5_CTX_FLAG_INITIATOR 0x00000001
260 #define KRB5_CTX_FLAG_CFX 0x00000002
261 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
264 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
266 unsigned int tmp_uint, keysize;
268 /* end time. While kc_endtime might be 64 bit the krb5 API
269 * still uses 32 bits. To delay the 2038 bug see the incoming
270 * value as a u32 which give us until 2106. See the link for details:
272 * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
274 if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
278 if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
281 if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
282 kctx->kc_initiate = 1;
283 if (tmp_uint & KRB5_CTX_FLAG_CFX)
285 if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
286 kctx->kc_have_acceptor_subkey = 1;
289 if (gss_get_bytes(&p, end, &kctx->kc_seq_send,
290 sizeof(kctx->kc_seq_send)))
294 if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
297 /* size of each key */
298 if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
301 /* number of keys - should always be 3 */
302 if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
306 CERROR("Invalid number of keys: %u\n", tmp_uint);
311 if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
314 if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize))
317 if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
320 CDEBUG(D_SEC, "successfully imported v2 context\n");
323 return GSS_S_FAILURE;
327 * The whole purpose here is trying to keep user level gss context parsing
328 * from nfs-utils unchanged as possible as we can, they are not quite mature
329 * yet, and many stuff still not clear, like heimdal etc.
332 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
333 struct gss_ctx *gctx)
335 struct krb5_ctx *kctx;
336 char *p = (char *)inbuf->data;
337 char *end = (char *)(inbuf->data + inbuf->len);
338 unsigned int tmp_uint, rc;
340 if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
341 CERROR("Fail to read version\n");
342 return GSS_S_FAILURE;
345 /* only support 0, 1 for the moment */
347 CERROR("Invalid version %u\n", tmp_uint);
348 return GSS_S_FAILURE;
353 return GSS_S_FAILURE;
355 if (tmp_uint == 0 || tmp_uint == 1) {
356 kctx->kc_initiate = tmp_uint;
357 rc = import_context_rfc1964(kctx, p, end);
359 rc = import_context_rfc4121(kctx, p, end);
363 rc = krb5_init_keys(kctx);
366 delete_context_kerberos(kctx);
369 return GSS_S_FAILURE;
372 gctx->internal_ctx_id = kctx;
373 return GSS_S_COMPLETE;
377 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
378 struct gss_ctx *gctx_new)
380 struct krb5_ctx *kctx = gctx->internal_ctx_id;
381 struct krb5_ctx *knew;
385 return GSS_S_FAILURE;
387 knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
388 knew->kc_cfx = kctx->kc_cfx;
389 knew->kc_seed_init = kctx->kc_seed_init;
390 knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
391 knew->kc_endtime = kctx->kc_endtime;
393 memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
394 knew->kc_seq_send = kctx->kc_seq_recv;
395 knew->kc_seq_recv = kctx->kc_seq_send;
396 knew->kc_enctype = kctx->kc_enctype;
398 if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
401 if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
403 if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
405 if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
407 if (krb5_init_keys(knew))
410 gctx_new->internal_ctx_id = knew;
411 CDEBUG(D_SEC, "successfully copied reverse context\n");
412 return GSS_S_COMPLETE;
415 delete_context_kerberos(knew);
417 return GSS_S_FAILURE;
421 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
424 struct krb5_ctx *kctx = gctx->internal_ctx_id;
426 *endtime = kctx->kc_endtime;
427 return GSS_S_COMPLETE;
431 void gss_delete_sec_context_kerberos(void *internal_ctx)
433 struct krb5_ctx *kctx = internal_ctx;
435 delete_context_kerberos(kctx);
440 * compute (keyed/keyless) checksum against the plain text which appended
441 * with krb5 wire token header.
444 __s32 krb5_make_checksum(__u32 enctype,
445 struct gss_keyblock *kb,
446 struct krb5_header *khdr,
447 int msgcnt, rawobj_t *msgs,
448 int iovcnt, lnet_kiov_t *iovs,
450 digest_hash hash_func)
452 struct krb5_enctype *ke = &enctypes[enctype];
453 struct ahash_request *req = NULL;
454 enum cfs_crypto_hash_alg hash_algo;
458 hash_algo = cfs_crypto_hash_alg(ke->ke_hash_name);
460 /* For the cbc(des) case we want md5 instead of hmac(md5) */
461 if (strcmp(ke->ke_enc_name, "cbc(des)"))
462 req = cfs_crypto_hash_init(hash_algo, kb->kb_key.data,
465 req = cfs_crypto_hash_init(hash_algo, NULL, 0);
468 CERROR("failed to alloc hash %s : rc = %d\n",
469 ke->ke_hash_name, rc);
473 cksum->len = cfs_crypto_hash_digestsize(hash_algo);
474 OBD_ALLOC_LARGE(cksum->data, cksum->len);
481 hdr.data = (__u8 *)khdr;
482 hdr.len = sizeof(*khdr);
486 CERROR("hash function for %s undefined\n",
490 rc = hash_func(req, &hdr, msgcnt, msgs, iovcnt, iovs);
494 if (!ke->ke_hash_hmac) {
497 cfs_crypto_hash_final(req, cksum->data, &cksum->len);
498 rc = gss_crypt_generic(kb->kb_tfm, 0, NULL,
499 cksum->data, cksum->data,
506 cfs_crypto_hash_final(req, cksum->data, &cksum->len);
508 return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
511 static void fill_krb5_header(struct krb5_ctx *kctx,
512 struct krb5_header *khdr,
515 unsigned char acceptor_flag;
517 acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
520 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
521 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
522 khdr->kh_ec = cpu_to_be16(0);
523 khdr->kh_rrc = cpu_to_be16(0);
525 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
526 khdr->kh_flags = acceptor_flag;
527 khdr->kh_ec = cpu_to_be16(0xffff);
528 khdr->kh_rrc = cpu_to_be16(0xffff);
531 khdr->kh_filler = 0xff;
532 spin_lock(&krb5_seq_lock);
533 khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
534 spin_unlock(&krb5_seq_lock);
537 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
538 struct krb5_header *khdr,
541 unsigned char acceptor_flag;
542 __u16 tok_id, ec_rrc;
544 acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
547 tok_id = KG_TOK_WRAP_MSG;
550 tok_id = KG_TOK_MIC_MSG;
555 if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
556 CERROR("bad token id\n");
557 return GSS_S_DEFECTIVE_TOKEN;
559 if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
560 CERROR("bad direction flag\n");
561 return GSS_S_BAD_SIG;
563 if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
564 CERROR("missing confidential flag\n");
565 return GSS_S_BAD_SIG;
567 if (khdr->kh_filler != 0xff) {
568 CERROR("bad filler\n");
569 return GSS_S_DEFECTIVE_TOKEN;
571 if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
572 be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
573 CERROR("bad EC or RRC\n");
574 return GSS_S_DEFECTIVE_TOKEN;
576 return GSS_S_COMPLETE;
580 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
587 struct krb5_ctx *kctx = gctx->internal_ctx_id;
588 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
589 struct krb5_header *khdr;
590 rawobj_t cksum = RAWOBJ_EMPTY;
593 /* fill krb5 header */
594 LASSERT(token->len >= sizeof(*khdr));
595 khdr = (struct krb5_header *)token->data;
596 fill_krb5_header(kctx, khdr, 0);
599 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, khdr,
600 msgcnt, msgs, iovcnt, iovs, &cksum,
602 GOTO(out_free_cksum, major = GSS_S_FAILURE);
604 LASSERT(cksum.len >= ke->ke_hash_size);
605 LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
606 memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
609 token->len = sizeof(*khdr) + ke->ke_hash_size;
610 major = GSS_S_COMPLETE;
617 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
624 struct krb5_ctx *kctx = gctx->internal_ctx_id;
625 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
626 struct krb5_header *khdr;
627 rawobj_t cksum = RAWOBJ_EMPTY;
630 if (token->len < sizeof(*khdr)) {
631 CERROR("short signature: %u\n", token->len);
632 return GSS_S_DEFECTIVE_TOKEN;
635 khdr = (struct krb5_header *)token->data;
637 major = verify_krb5_header(kctx, khdr, 0);
638 if (major != GSS_S_COMPLETE) {
639 CERROR("bad krb5 header\n");
643 if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
644 CERROR("short signature: %u, require %d\n",
645 token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
646 GOTO(out, major = GSS_S_FAILURE);
649 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
650 khdr, msgcnt, msgs, iovcnt, iovs, &cksum,
652 GOTO(out_free_cksum, major = GSS_S_FAILURE);
654 LASSERT(cksum.len >= ke->ke_hash_size);
655 if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
657 CERROR("checksum mismatch\n");
658 GOTO(out_free_cksum, major = GSS_S_BAD_SIG);
660 major = GSS_S_COMPLETE;
668 * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
671 int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
672 struct krb5_header *khdr,
674 struct ptlrpc_bulk_desc *desc,
678 struct blkcipher_desc ciph_desc;
679 __u8 local_iv[16] = {0};
680 struct scatterlist src, dst;
681 struct sg_table sg_src, sg_dst;
682 int blocksize, i, rc, nob = 0;
684 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
685 LASSERT(desc->bd_iov_count);
686 LASSERT(GET_ENC_KIOV(desc));
688 blocksize = crypto_blkcipher_blocksize(tfm);
689 LASSERT(blocksize > 1);
690 LASSERT(cipher->len == blocksize + sizeof(*khdr));
693 ciph_desc.info = local_iv;
696 /* encrypt confounder */
697 rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
701 rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
703 gss_teardown_sgtable(&sg_src);
707 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
708 sg_src.sgl, blocksize);
710 gss_teardown_sgtable(&sg_dst);
711 gss_teardown_sgtable(&sg_src);
714 CERROR("error to encrypt confounder: %d\n", rc);
718 /* encrypt clear pages */
719 for (i = 0; i < desc->bd_iov_count; i++) {
720 sg_init_table(&src, 1);
721 sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
722 (BD_GET_KIOV(desc, i).kiov_len +
725 BD_GET_KIOV(desc, i).kiov_offset);
728 sg_init_table(&dst, 1);
729 sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
730 src.length, src.offset);
732 BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
733 BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
735 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
738 CERROR("error to encrypt page: %d\n", rc);
743 /* encrypt krb5 header */
744 rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
748 rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
751 gss_teardown_sgtable(&sg_src);
755 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
758 gss_teardown_sgtable(&sg_dst);
759 gss_teardown_sgtable(&sg_src);
762 CERROR("error to encrypt krb5 header: %d\n", rc);
773 * desc->bd_nob_transferred is the size of cipher text received.
774 * desc->bd_nob is the target size of plain text supposed to be.
776 * if adj_nob != 0, we adjust each page's kiov_len to the actual
778 * - for client read: we don't know data size for each page, so
779 * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
780 * be smaller, so we need to adjust it according to
781 * bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
782 * this means we DO NOT support the situation that server send an odd size
783 * data in a page which is not the last one.
784 * - for server write: we knows exactly data size for each page being expected,
785 * thus kiov_len is accurate already, so we should not adjust it at all.
786 * and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
787 * round_up(bd_iov[]->kiov_len) which
788 * should have been done by prep_bulk().
791 int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
792 struct krb5_header *khdr,
793 struct ptlrpc_bulk_desc *desc,
798 struct blkcipher_desc ciph_desc;
799 __u8 local_iv[16] = {0};
800 struct scatterlist src, dst;
801 struct sg_table sg_src, sg_dst;
802 int ct_nob = 0, pt_nob = 0;
803 int blocksize, i, rc;
805 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
806 LASSERT(desc->bd_iov_count);
807 LASSERT(GET_ENC_KIOV(desc));
808 LASSERT(desc->bd_nob_transferred);
810 blocksize = crypto_blkcipher_blocksize(tfm);
811 LASSERT(blocksize > 1);
812 LASSERT(cipher->len == blocksize + sizeof(*khdr));
815 ciph_desc.info = local_iv;
818 if (desc->bd_nob_transferred % blocksize) {
819 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
823 /* decrypt head (confounder) */
824 rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
828 rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
830 gss_teardown_sgtable(&sg_src);
834 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
835 sg_src.sgl, blocksize);
837 gss_teardown_sgtable(&sg_dst);
838 gss_teardown_sgtable(&sg_src);
841 CERROR("error to decrypt confounder: %d\n", rc);
845 for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
847 if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
849 BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
851 CERROR("page %d: odd offset %u len %u, blocksize %d\n",
852 i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
853 BD_GET_ENC_KIOV(desc, i).kiov_len,
859 if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
860 desc->bd_nob_transferred)
861 BD_GET_ENC_KIOV(desc, i).kiov_len =
862 desc->bd_nob_transferred - ct_nob;
864 BD_GET_KIOV(desc, i).kiov_len =
865 BD_GET_ENC_KIOV(desc, i).kiov_len;
866 if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
868 BD_GET_KIOV(desc, i).kiov_len =
869 desc->bd_nob - pt_nob;
871 /* this should be guaranteed by LNET */
872 LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
874 desc->bd_nob_transferred);
875 LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
876 BD_GET_ENC_KIOV(desc, i).kiov_len);
879 if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
882 sg_init_table(&src, 1);
883 sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
884 BD_GET_ENC_KIOV(desc, i).kiov_len,
885 BD_GET_ENC_KIOV(desc, i).kiov_offset);
887 if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
889 BD_GET_KIOV(desc, i).kiov_page);
891 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
894 CERROR("error to decrypt page: %d\n", rc);
898 if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
899 memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
900 BD_GET_KIOV(desc, i).kiov_offset,
901 page_address(BD_GET_ENC_KIOV(desc, i).
903 BD_GET_KIOV(desc, i).kiov_offset,
904 BD_GET_KIOV(desc, i).kiov_len);
907 ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
908 pt_nob += BD_GET_KIOV(desc, i).kiov_len;
911 if (unlikely(ct_nob != desc->bd_nob_transferred)) {
912 CERROR("%d cipher text transferred but only %d decrypted\n",
913 desc->bd_nob_transferred, ct_nob);
917 if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
918 CERROR("%d plain text expected but only %d received\n",
919 desc->bd_nob, pt_nob);
923 /* if needed, clear up the rest unused iovs */
925 while (i < desc->bd_iov_count)
926 BD_GET_KIOV(desc, i++).kiov_len = 0;
928 /* decrypt tail (krb5 header) */
929 rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
934 rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
937 gss_teardown_sgtable(&sg_src);
941 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
944 gss_teardown_sgtable(&sg_src);
945 gss_teardown_sgtable(&sg_dst);
948 CERROR("error to decrypt tail: %d\n", rc);
952 if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
953 CERROR("krb5 header doesn't match\n");
961 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
967 struct krb5_ctx *kctx = gctx->internal_ctx_id;
968 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
969 struct krb5_header *khdr;
971 rawobj_t cksum = RAWOBJ_EMPTY;
972 rawobj_t data_desc[3], cipher;
973 __u8 conf[GSS_MAX_CIPHER_BLOCK];
974 __u8 local_iv[16] = {0};
979 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
980 LASSERT(kctx->kc_keye.kb_tfm == NULL ||
982 crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
985 * final token format:
986 * ---------------------------------------------------
987 * | krb5 header | cipher text | checksum (16 bytes) |
988 * ---------------------------------------------------
991 /* fill krb5 header */
992 LASSERT(token->len >= sizeof(*khdr));
993 khdr = (struct krb5_header *)token->data;
994 fill_krb5_header(kctx, khdr, 1);
996 /* generate confounder */
997 get_random_bytes(conf, ke->ke_conf_size);
999 /* get encryption blocksize. note kc_keye might not associated with
1000 * a tfm, currently only for arcfour-hmac */
1001 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1002 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1005 LASSERT(kctx->kc_keye.kb_tfm);
1006 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1008 LASSERT(blocksize <= ke->ke_conf_size);
1010 /* padding the message */
1011 if (gss_add_padding(msg, msg_buflen, blocksize))
1012 return GSS_S_FAILURE;
1015 * clear text layout for checksum:
1016 * ------------------------------------------------------
1017 * | confounder | gss header | clear msgs | krb5 header |
1018 * ------------------------------------------------------
1020 data_desc[0].data = conf;
1021 data_desc[0].len = ke->ke_conf_size;
1022 data_desc[1].data = gsshdr->data;
1023 data_desc[1].len = gsshdr->len;
1024 data_desc[2].data = msg->data;
1025 data_desc[2].len = msg->len;
1027 /* compute checksum */
1028 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1029 khdr, 3, data_desc, 0, NULL, &cksum,
1031 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1032 LASSERT(cksum.len >= ke->ke_hash_size);
1035 * clear text layout for encryption:
1036 * -----------------------------------------
1037 * | confounder | clear msgs | krb5 header |
1038 * -----------------------------------------
1040 data_desc[0].data = conf;
1041 data_desc[0].len = ke->ke_conf_size;
1042 data_desc[1].data = msg->data;
1043 data_desc[1].len = msg->len;
1044 data_desc[2].data = (__u8 *) khdr;
1045 data_desc[2].len = sizeof(*khdr);
1047 /* cipher text will be directly inplace */
1048 cipher.data = (__u8 *)(khdr + 1);
1049 cipher.len = token->len - sizeof(*khdr);
1050 LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1052 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1053 rawobj_t arc4_keye = RAWOBJ_EMPTY;
1054 struct crypto_blkcipher *arc4_tfm;
1056 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1057 NULL, 1, &cksum, 0, NULL, &arc4_keye,
1059 CERROR("failed to obtain arc4 enc key\n");
1060 GOTO(arc4_out_key, rc = -EACCES);
1063 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1064 if (IS_ERR(arc4_tfm)) {
1065 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1066 GOTO(arc4_out_key, rc = -EACCES);
1069 if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1071 CERROR("failed to set arc4 key, len %d\n",
1073 GOTO(arc4_out_tfm, rc = -EACCES);
1076 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
1079 crypto_free_blkcipher(arc4_tfm);
1081 rawobj_free(&arc4_keye);
1083 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
1084 data_desc, &cipher, 1);
1088 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1090 /* fill in checksum */
1091 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1092 memcpy((char *)(khdr + 1) + cipher.len,
1093 cksum.data + cksum.len - ke->ke_hash_size,
1096 /* final token length */
1097 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1098 major = GSS_S_COMPLETE;
1100 rawobj_free(&cksum);
1105 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1106 struct ptlrpc_bulk_desc *desc)
1108 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1111 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1112 LASSERT(desc->bd_iov_count);
1113 LASSERT(GET_ENC_KIOV(desc));
1114 LASSERT(kctx->kc_keye.kb_tfm);
1116 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1118 for (i = 0; i < desc->bd_iov_count; i++) {
1119 LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
1121 * offset should always start at page boundary of either
1122 * client or server side.
1124 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
1125 CERROR("odd offset %d in page %d\n",
1126 BD_GET_KIOV(desc, i).kiov_offset, i);
1127 return GSS_S_FAILURE;
1130 BD_GET_ENC_KIOV(desc, i).kiov_offset =
1131 BD_GET_KIOV(desc, i).kiov_offset;
1132 BD_GET_ENC_KIOV(desc, i).kiov_len =
1133 (BD_GET_KIOV(desc, i).kiov_len +
1134 blocksize - 1) & (~(blocksize - 1));
1137 return GSS_S_COMPLETE;
1141 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1142 struct ptlrpc_bulk_desc *desc,
1143 rawobj_t *token, int adj_nob)
1145 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1146 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1147 struct krb5_header *khdr;
1149 rawobj_t cksum = RAWOBJ_EMPTY;
1150 rawobj_t data_desc[1], cipher;
1151 __u8 conf[GSS_MAX_CIPHER_BLOCK];
1155 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1157 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1160 * final token format:
1161 * --------------------------------------------------
1162 * | krb5 header | head/tail cipher text | checksum |
1163 * --------------------------------------------------
1166 /* fill krb5 header */
1167 LASSERT(token->len >= sizeof(*khdr));
1168 khdr = (struct krb5_header *)token->data;
1169 fill_krb5_header(kctx, khdr, 1);
1171 /* generate confounder */
1172 get_random_bytes(conf, ke->ke_conf_size);
1174 /* get encryption blocksize. note kc_keye might not associated with
1175 * a tfm, currently only for arcfour-hmac */
1176 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1177 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1180 LASSERT(kctx->kc_keye.kb_tfm);
1181 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1185 * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1186 * the bulk token size would be exactly (sizeof(krb5_header) +
1187 * blocksize + sizeof(krb5_header) + hashsize)
1189 LASSERT(blocksize <= ke->ke_conf_size);
1190 LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1191 LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1194 * clear text layout for checksum:
1195 * ------------------------------------------
1196 * | confounder | clear pages | krb5 header |
1197 * ------------------------------------------
1199 data_desc[0].data = conf;
1200 data_desc[0].len = ke->ke_conf_size;
1202 /* compute checksum */
1203 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1205 desc->bd_iov_count, GET_KIOV(desc),
1206 &cksum, gctx->hash_func))
1207 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1208 LASSERT(cksum.len >= ke->ke_hash_size);
1211 * clear text layout for encryption:
1212 * ------------------------------------------
1213 * | confounder | clear pages | krb5 header |
1214 * ------------------------------------------
1216 * ---------- (cipher pages) |
1218 * -------------------------------------------
1219 * | krb5 header | cipher text | cipher text |
1220 * -------------------------------------------
1222 data_desc[0].data = conf;
1223 data_desc[0].len = ke->ke_conf_size;
1225 cipher.data = (__u8 *)(khdr + 1);
1226 cipher.len = blocksize + sizeof(*khdr);
1228 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1232 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1233 conf, desc, &cipher, adj_nob);
1236 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1238 /* fill in checksum */
1239 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1240 memcpy((char *)(khdr + 1) + cipher.len,
1241 cksum.data + cksum.len - ke->ke_hash_size,
1244 /* final token length */
1245 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1246 major = GSS_S_COMPLETE;
1248 rawobj_free(&cksum);
1253 __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
1258 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1259 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1260 struct krb5_header *khdr;
1261 unsigned char *tmpbuf;
1262 int blocksize, bodysize;
1263 rawobj_t cksum = RAWOBJ_EMPTY;
1264 rawobj_t cipher_in, plain_out;
1265 rawobj_t hash_objs[3];
1268 __u8 local_iv[16] = {0};
1272 if (token->len < sizeof(*khdr)) {
1273 CERROR("short signature: %u\n", token->len);
1274 return GSS_S_DEFECTIVE_TOKEN;
1277 khdr = (struct krb5_header *)token->data;
1279 major = verify_krb5_header(kctx, khdr, 1);
1280 if (major != GSS_S_COMPLETE) {
1281 CERROR("bad krb5 header\n");
1286 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1287 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1290 LASSERT(kctx->kc_keye.kb_tfm);
1291 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1294 /* expected token layout:
1295 * ----------------------------------------
1296 * | krb5 header | cipher text | checksum |
1297 * ----------------------------------------
1299 bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1301 if (bodysize % blocksize) {
1302 CERROR("odd bodysize %d\n", bodysize);
1303 return GSS_S_DEFECTIVE_TOKEN;
1306 if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1307 CERROR("incomplete token: bodysize %d\n", bodysize);
1308 return GSS_S_DEFECTIVE_TOKEN;
1311 if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1312 CERROR("buffer too small: %u, require %d\n",
1313 msg->len, bodysize - ke->ke_conf_size);
1314 return GSS_S_FAILURE;
1318 OBD_ALLOC_LARGE(tmpbuf, bodysize);
1320 return GSS_S_FAILURE;
1322 major = GSS_S_FAILURE;
1324 cipher_in.data = (__u8 *)(khdr + 1);
1325 cipher_in.len = bodysize;
1326 plain_out.data = tmpbuf;
1327 plain_out.len = bodysize;
1329 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1331 struct crypto_blkcipher *arc4_tfm;
1333 cksum.data = token->data + token->len - ke->ke_hash_size;
1334 cksum.len = ke->ke_hash_size;
1336 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1337 NULL, 1, &cksum, 0, NULL, &arc4_keye,
1339 CERROR("failed to obtain arc4 enc key\n");
1340 GOTO(arc4_out, rc = -EACCES);
1343 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1344 if (IS_ERR(arc4_tfm)) {
1345 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1346 GOTO(arc4_out_key, rc = -EACCES);
1349 if (crypto_blkcipher_setkey(arc4_tfm,
1350 arc4_keye.data, arc4_keye.len)) {
1351 CERROR("failed to set arc4 key, len %d\n",
1353 GOTO(arc4_out_tfm, rc = -EACCES);
1356 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
1359 crypto_free_blkcipher(arc4_tfm);
1361 rawobj_free(&arc4_keye);
1363 cksum = RAWOBJ_EMPTY;
1365 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1,
1366 &cipher_in, &plain_out, 0);
1370 CERROR("error decrypt\n");
1373 LASSERT(plain_out.len == bodysize);
1375 /* expected clear text layout:
1376 * -----------------------------------------
1377 * | confounder | clear msgs | krb5 header |
1378 * -----------------------------------------
1381 /* verify krb5 header in token is not modified */
1382 if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1384 CERROR("decrypted krb5 header mismatch\n");
1388 /* verify checksum, compose clear text as layout:
1389 * ------------------------------------------------------
1390 * | confounder | gss header | clear msgs | krb5 header |
1391 * ------------------------------------------------------
1393 hash_objs[0].len = ke->ke_conf_size;
1394 hash_objs[0].data = plain_out.data;
1395 hash_objs[1].len = gsshdr->len;
1396 hash_objs[1].data = gsshdr->data;
1397 hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1398 hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1399 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1400 khdr, 3, hash_objs, 0, NULL, &cksum,
1404 LASSERT(cksum.len >= ke->ke_hash_size);
1405 if (memcmp((char *)(khdr + 1) + bodysize,
1406 cksum.data + cksum.len - ke->ke_hash_size,
1407 ke->ke_hash_size)) {
1408 CERROR("checksum mismatch\n");
1412 msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr);
1413 memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1415 major = GSS_S_COMPLETE;
1417 OBD_FREE_LARGE(tmpbuf, bodysize);
1418 rawobj_free(&cksum);
1423 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1424 struct ptlrpc_bulk_desc *desc,
1425 rawobj_t *token, int adj_nob)
1427 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1428 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1429 struct krb5_header *khdr;
1431 rawobj_t cksum = RAWOBJ_EMPTY;
1432 rawobj_t cipher, plain;
1433 rawobj_t data_desc[1];
1437 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1440 if (token->len < sizeof(*khdr)) {
1441 CERROR("short signature: %u\n", token->len);
1442 return GSS_S_DEFECTIVE_TOKEN;
1445 khdr = (struct krb5_header *)token->data;
1447 major = verify_krb5_header(kctx, khdr, 1);
1448 if (major != GSS_S_COMPLETE) {
1449 CERROR("bad krb5 header\n");
1454 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1455 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1459 LASSERT(kctx->kc_keye.kb_tfm);
1460 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1462 LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1465 * token format is expected as:
1466 * -----------------------------------------------
1467 * | krb5 header | head/tail cipher text | cksum |
1468 * -----------------------------------------------
1470 if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1472 CERROR("short token size: %u\n", token->len);
1473 return GSS_S_DEFECTIVE_TOKEN;
1476 cipher.data = (__u8 *) (khdr + 1);
1477 cipher.len = blocksize + sizeof(*khdr);
1478 plain.data = cipher.data;
1479 plain.len = cipher.len;
1481 rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1482 desc, &cipher, &plain, adj_nob);
1484 return GSS_S_DEFECTIVE_TOKEN;
1487 * verify checksum, compose clear text as layout:
1488 * ------------------------------------------
1489 * | confounder | clear pages | krb5 header |
1490 * ------------------------------------------
1492 data_desc[0].data = plain.data;
1493 data_desc[0].len = blocksize;
1495 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1499 &cksum, gctx->hash_func))
1500 return GSS_S_FAILURE;
1501 LASSERT(cksum.len >= ke->ke_hash_size);
1503 if (memcmp(plain.data + blocksize + sizeof(*khdr),
1504 cksum.data + cksum.len - ke->ke_hash_size,
1505 ke->ke_hash_size)) {
1506 CERROR("checksum mismatch\n");
1507 rawobj_free(&cksum);
1508 return GSS_S_BAD_SIG;
1511 rawobj_free(&cksum);
1512 return GSS_S_COMPLETE;
1515 int gss_display_kerberos(struct gss_ctx *ctx,
1519 struct krb5_ctx *kctx = ctx->internal_ctx_id;
1522 written = snprintf(buf, bufsize, "krb5 (%s)",
1523 enctype2str(kctx->kc_enctype));
1527 static struct gss_api_ops gss_kerberos_ops = {
1528 .gss_import_sec_context = gss_import_sec_context_kerberos,
1529 .gss_copy_reverse_context = gss_copy_reverse_context_kerberos,
1530 .gss_inquire_context = gss_inquire_context_kerberos,
1531 .gss_get_mic = gss_get_mic_kerberos,
1532 .gss_verify_mic = gss_verify_mic_kerberos,
1533 .gss_wrap = gss_wrap_kerberos,
1534 .gss_unwrap = gss_unwrap_kerberos,
1535 .gss_prep_bulk = gss_prep_bulk_kerberos,
1536 .gss_wrap_bulk = gss_wrap_bulk_kerberos,
1537 .gss_unwrap_bulk = gss_unwrap_bulk_kerberos,
1538 .gss_delete_sec_context = gss_delete_sec_context_kerberos,
1539 .gss_display = gss_display_kerberos,
1542 static struct subflavor_desc gss_kerberos_sfs[] = {
1544 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5N,
1546 .sf_service = SPTLRPC_SVC_NULL,
1550 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5A,
1552 .sf_service = SPTLRPC_SVC_AUTH,
1556 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
1558 .sf_service = SPTLRPC_SVC_INTG,
1562 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5P,
1564 .sf_service = SPTLRPC_SVC_PRIV,
1569 static struct gss_api_mech gss_kerberos_mech = {
1570 /* .gm_owner uses default NULL value for THIS_MODULE */
1572 .gm_oid = (rawobj_t)
1573 {9, "\052\206\110\206\367\022\001\002\002"},
1574 .gm_ops = &gss_kerberos_ops,
1576 .gm_sfs = gss_kerberos_sfs,
1579 int __init init_kerberos_module(void)
1583 status = lgss_mech_register(&gss_kerberos_mech);
1585 CERROR("Failed to register kerberos gss mechanism!\n");
1589 void cleanup_kerberos_module(void)
1591 lgss_mech_unregister(&gss_kerberos_mech);