Whamcloud - gitweb
LU-6020 gss: properly map buffers to sg
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, 2015, Intel Corporation.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/crypto.h>
56 #include <linux/mutex.h>
57
58 #include <obd.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre/lustre_idl.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
65
66 #include "gss_err.h"
67 #include "gss_internal.h"
68 #include "gss_api.h"
69 #include "gss_asn1.h"
70 #include "gss_krb5.h"
71
72 static spinlock_t krb5_seq_lock;
73
74 struct krb5_enctype {
75         char           *ke_dispname;
76         char           *ke_enc_name;            /* linux tfm name */
77         char           *ke_hash_name;           /* linux tfm name */
78         int             ke_enc_mode;            /* linux tfm mode */
79         int             ke_hash_size;           /* checksum size */
80         int             ke_conf_size;           /* confounder size */
81         unsigned int    ke_hash_hmac:1;         /* is hmac? */
82 };
83
84 /*
85  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
86  * but currently we simply CBC with padding, because linux doesn't support CTS
87  * yet. this need to be fixed in the future.
88  */
89 static struct krb5_enctype enctypes[] = {
90         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
91                 "des-cbc-md5",
92                 "cbc(des)",
93                 "md5",
94                 0,
95                 16,
96                 8,
97                 0,
98         },
99         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
100                 "des3-hmac-sha1",
101                 "cbc(des3_ede)",
102                 "hmac(sha1)",
103                 0,
104                 20,
105                 8,
106                 1,
107         },
108         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
109                 "aes128-cts-hmac-sha1-96",
110                 "cbc(aes)",
111                 "hmac(sha1)",
112                 0,
113                 12,
114                 16,
115                 1,
116         },
117         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
118                 "aes256-cts-hmac-sha1-96",
119                 "cbc(aes)",
120                 "hmac(sha1)",
121                 0,
122                 12,
123                 16,
124                 1,
125         },
126         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
127                 "arcfour-hmac-md5",
128                 "ecb(arc4)",
129                 "hmac(md5)",
130                 0,
131                 16,
132                 8,
133                 1,
134         },
135 };
136
137 #define MAX_ENCTYPES    sizeof(enctypes)/sizeof(struct krb5_enctype)
138
139 static const char * enctype2str(__u32 enctype)
140 {
141         if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
142                 return enctypes[enctype].ke_dispname;
143
144         return "unknown";
145 }
146
147 static
148 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
149 {
150         kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0);
151         if (IS_ERR(kb->kb_tfm)) {
152                 CERROR("failed to alloc tfm: %s, mode %d\n",
153                        alg_name, alg_mode);
154                 return -1;
155         }
156
157         if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
158                 CERROR("failed to set %s key, len %d\n",
159                        alg_name, kb->kb_key.len);
160                 return -1;
161         }
162
163         return 0;
164 }
165
166 static
167 int krb5_init_keys(struct krb5_ctx *kctx)
168 {
169         struct krb5_enctype *ke;
170
171         if (kctx->kc_enctype >= MAX_ENCTYPES ||
172             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
173                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
174                 return -1;
175         }
176
177         ke = &enctypes[kctx->kc_enctype];
178
179         /* tfm arc4 is stateful, user should alloc-use-free by his own */
180         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
181             keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
182                 return -1;
183
184         /* tfm hmac is stateful, user should alloc-use-free by his own */
185         if (ke->ke_hash_hmac == 0 &&
186             keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
187                 return -1;
188         if (ke->ke_hash_hmac == 0 &&
189             keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
190                 return -1;
191
192         return 0;
193 }
194
195 static
196 void keyblock_free(struct krb5_keyblock *kb)
197 {
198         rawobj_free(&kb->kb_key);
199         if (kb->kb_tfm)
200                 crypto_free_blkcipher(kb->kb_tfm);
201 }
202
203 static
204 int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
205 {
206         return rawobj_dup(&new->kb_key, &kb->kb_key);
207 }
208
209 static
210 int get_bytes(char **ptr, const char *end, void *res, int len)
211 {
212         char *p, *q;
213         p = *ptr;
214         q = p + len;
215         if (q > end || q < p)
216                 return -1;
217         memcpy(res, p, len);
218         *ptr = q;
219         return 0;
220 }
221
222 static
223 int get_rawobj(char **ptr, const char *end, rawobj_t *res)
224 {
225         char   *p, *q;
226         __u32   len;
227
228         p = *ptr;
229         if (get_bytes(&p, end, &len, sizeof(len)))
230                 return -1;
231
232         q = p + len;
233         if (q > end || q < p)
234                 return -1;
235
236         OBD_ALLOC_LARGE(res->data, len);
237         if (!res->data)
238                 return -1;
239
240         res->len = len;
241         memcpy(res->data, p, len);
242         *ptr = q;
243         return 0;
244 }
245
246 static
247 int get_keyblock(char **ptr, const char *end,
248                  struct krb5_keyblock *kb, __u32 keysize)
249 {
250         char *buf;
251
252         OBD_ALLOC_LARGE(buf, keysize);
253         if (buf == NULL)
254                 return -1;
255
256         if (get_bytes(ptr, end, buf, keysize)) {
257                 OBD_FREE_LARGE(buf, keysize);
258                 return -1;
259         }
260
261         kb->kb_key.len = keysize;
262         kb->kb_key.data = buf;
263         return 0;
264 }
265
266 static
267 void delete_context_kerberos(struct krb5_ctx *kctx)
268 {
269         rawobj_free(&kctx->kc_mech_used);
270
271         keyblock_free(&kctx->kc_keye);
272         keyblock_free(&kctx->kc_keyi);
273         keyblock_free(&kctx->kc_keyc);
274 }
275
276 static
277 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
278 {
279         unsigned int    tmp_uint, keysize;
280
281         /* seed_init flag */
282         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
283                 goto out_err;
284         kctx->kc_seed_init = (tmp_uint != 0);
285
286         /* seed */
287         if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
288                 goto out_err;
289
290         /* sign/seal algorithm, not really used now */
291         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
292             get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
293                 goto out_err;
294
295         /* end time */
296         if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
297                 goto out_err;
298
299         /* seq send */
300         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
301                 goto out_err;
302         kctx->kc_seq_send = tmp_uint;
303
304         /* mech oid */
305         if (get_rawobj(&p, end, &kctx->kc_mech_used))
306                 goto out_err;
307
308         /* old style enc/seq keys in format:
309          *   - enctype (u32)
310          *   - keysize (u32)
311          *   - keydata
312          * we decompose them to fit into the new context
313          */
314
315         /* enc key */
316         if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
317                 goto out_err;
318
319         if (get_bytes(&p, end, &keysize, sizeof(keysize)))
320                 goto out_err;
321
322         if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
323                 goto out_err;
324
325         /* seq key */
326         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
327             tmp_uint != kctx->kc_enctype)
328                 goto out_err;
329
330         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
331             tmp_uint != keysize)
332                 goto out_err;
333
334         if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
335                 goto out_err;
336
337         /* old style fallback */
338         if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
339                 goto out_err;
340
341         if (p != end)
342                 goto out_err;
343
344         CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
345         return 0;
346 out_err:
347         return GSS_S_FAILURE;
348 }
349
350 /* Flags for version 2 context flags */
351 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
352 #define KRB5_CTX_FLAG_CFX               0x00000002
353 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
354
355 static
356 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
357 {
358         unsigned int    tmp_uint, keysize;
359
360         /* end time */
361         if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
362                 goto out_err;
363
364         /* flags */
365         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
366                 goto out_err;
367
368         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
369                 kctx->kc_initiate = 1;
370         if (tmp_uint & KRB5_CTX_FLAG_CFX)
371                 kctx->kc_cfx = 1;
372         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
373                 kctx->kc_have_acceptor_subkey = 1;
374
375         /* seq send */
376         if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
377                 goto out_err;
378
379         /* enctype */
380         if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
381                 goto out_err;
382
383         /* size of each key */
384         if (get_bytes(&p, end, &keysize, sizeof(keysize)))
385                 goto out_err;
386
387         /* number of keys - should always be 3 */
388         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
389                 goto out_err;
390
391         if (tmp_uint != 3) {
392                 CERROR("Invalid number of keys: %u\n", tmp_uint);
393                 goto out_err;
394         }
395
396         /* ke */
397         if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
398                 goto out_err;
399         /* ki */
400         if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
401                 goto out_err;
402         /* ki */
403         if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
404                 goto out_err;
405
406         CDEBUG(D_SEC, "successfully imported v2 context\n");
407         return 0;
408 out_err:
409         return GSS_S_FAILURE;
410 }
411
412 /*
413  * The whole purpose here is trying to keep user level gss context parsing
414  * from nfs-utils unchanged as possible as we can, they are not quite mature
415  * yet, and many stuff still not clear, like heimdal etc.
416  */
417 static
418 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
419                                       struct gss_ctx *gctx)
420 {
421         struct krb5_ctx *kctx;
422         char            *p = (char *) inbuf->data;
423         char            *end = (char *) (inbuf->data + inbuf->len);
424         unsigned int     tmp_uint, rc;
425
426         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
427                 CERROR("Fail to read version\n");
428                 return GSS_S_FAILURE;
429         }
430
431         /* only support 0, 1 for the moment */
432         if (tmp_uint > 2) {
433                 CERROR("Invalid version %u\n", tmp_uint);
434                 return GSS_S_FAILURE;
435         }
436
437         OBD_ALLOC_PTR(kctx);
438         if (!kctx)
439                 return GSS_S_FAILURE;
440
441         if (tmp_uint == 0 || tmp_uint == 1) {
442                 kctx->kc_initiate = tmp_uint;
443                 rc = import_context_rfc1964(kctx, p, end);
444         } else {
445                 rc = import_context_rfc4121(kctx, p, end);
446         }
447
448         if (rc == 0)
449                 rc = krb5_init_keys(kctx);
450
451         if (rc) {
452                 delete_context_kerberos(kctx);
453                 OBD_FREE_PTR(kctx);
454
455                 return GSS_S_FAILURE;
456         }
457
458         gctx->internal_ctx_id = kctx;
459         return GSS_S_COMPLETE;
460 }
461
462 static
463 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
464                                         struct gss_ctx *gctx_new)
465 {
466         struct krb5_ctx *kctx = gctx->internal_ctx_id;
467         struct krb5_ctx *knew;
468
469         OBD_ALLOC_PTR(knew);
470         if (!knew)
471                 return GSS_S_FAILURE;
472
473         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
474         knew->kc_cfx = kctx->kc_cfx;
475         knew->kc_seed_init = kctx->kc_seed_init;
476         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
477         knew->kc_endtime = kctx->kc_endtime;
478
479         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
480         knew->kc_seq_send = kctx->kc_seq_recv;
481         knew->kc_seq_recv = kctx->kc_seq_send;
482         knew->kc_enctype = kctx->kc_enctype;
483
484         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
485                 goto out_err;
486
487         if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
488                 goto out_err;
489         if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
490                 goto out_err;
491         if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
492                 goto out_err;
493         if (krb5_init_keys(knew))
494                 goto out_err;
495
496         gctx_new->internal_ctx_id = knew;
497         CDEBUG(D_SEC, "successfully copied reverse context\n");
498         return GSS_S_COMPLETE;
499
500 out_err:
501         delete_context_kerberos(knew);
502         OBD_FREE_PTR(knew);
503         return GSS_S_FAILURE;
504 }
505
506 static
507 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
508                                    unsigned long  *endtime)
509 {
510         struct krb5_ctx *kctx = gctx->internal_ctx_id;
511
512         *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
513         return GSS_S_COMPLETE;
514 }
515
516 static
517 void gss_delete_sec_context_kerberos(void *internal_ctx)
518 {
519         struct krb5_ctx *kctx = internal_ctx;
520
521         delete_context_kerberos(kctx);
522         OBD_FREE_PTR(kctx);
523 }
524
525 /*
526  * Should be used for buffers allocated with k/vmalloc().
527  *
528  * Dispose of @sgt with teardown_sgtable().
529  *
530  * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
531  * in cases where a single sg is sufficient.  No attempt to reduce the
532  * number of sgs by squeezing physically contiguous pages together is
533  * made though, for simplicity.
534  *
535  * This function is copied from the ceph filesystem code.
536  */
537 static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
538                          const void *buf, unsigned int buf_len)
539 {
540         struct scatterlist *sg;
541         const bool is_vmalloc = is_vmalloc_addr(buf);
542         unsigned int off = offset_in_page(buf);
543         unsigned int chunk_cnt = 1;
544         unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
545         int i;
546         int ret;
547
548         if (buf_len == 0) {
549                 memset(sgt, 0, sizeof(*sgt));
550                 return -EINVAL;
551         }
552
553         if (is_vmalloc) {
554                 chunk_cnt = chunk_len >> PAGE_SHIFT;
555                 chunk_len = PAGE_SIZE;
556         }
557
558         if (chunk_cnt > 1) {
559                 ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
560                 if (ret)
561                         return ret;
562         } else {
563                 WARN_ON(chunk_cnt != 1);
564                 sg_init_table(prealloc_sg, 1);
565                 sgt->sgl = prealloc_sg;
566                 sgt->nents = sgt->orig_nents = 1;
567         }
568
569         for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
570                 struct page *page;
571                 unsigned int len = min(chunk_len - off, buf_len);
572
573                 if (is_vmalloc)
574                         page = vmalloc_to_page(buf);
575                 else
576                         page = virt_to_page(buf);
577
578                 sg_set_page(sg, page, len, off);
579
580                 off = 0;
581                 buf += len;
582                 buf_len -= len;
583         }
584
585         WARN_ON(buf_len != 0);
586
587         return 0;
588 }
589
590 static void teardown_sgtable(struct sg_table *sgt)
591 {
592         if (sgt->orig_nents > 1)
593                 sg_free_table(sgt);
594 }
595
596 static
597 __u32 krb5_encrypt(struct crypto_blkcipher *tfm,
598                    int decrypt,
599                    void * iv,
600                    void * in,
601                    void * out,
602                    int length)
603 {
604         struct sg_table sg_out;
605         struct blkcipher_desc desc;
606         struct scatterlist    sg;
607         __u8 local_iv[16] = {0};
608         __u32 ret = -EINVAL;
609
610         LASSERT(tfm);
611         desc.tfm  = tfm;
612         desc.info = local_iv;
613         desc.flags= 0;
614
615         if (length % crypto_blkcipher_blocksize(tfm) != 0) {
616                 CERROR("output length %d mismatch blocksize %d\n",
617                        length, crypto_blkcipher_blocksize(tfm));
618                 goto out;
619         }
620
621         if (crypto_blkcipher_ivsize(tfm) > 16) {
622                 CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
623                 goto out;
624         }
625
626         if (iv)
627                 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
628
629         memcpy(out, in, length);
630
631         ret = setup_sgtable(&sg_out, &sg, out, length);
632         if (ret != 0)
633                 goto out;
634
635         if (decrypt)
636                 ret = crypto_blkcipher_decrypt_iv(&desc, sg_out.sgl,
637                                                   sg_out.sgl, length);
638         else
639                 ret = crypto_blkcipher_encrypt_iv(&desc, sg_out.sgl,
640                                                   sg_out.sgl, length);
641
642         teardown_sgtable(&sg_out);
643 out:
644         return ret;
645 }
646
647 static inline
648 int krb5_digest_hmac(struct crypto_hash *tfm,
649                      rawobj_t *key,
650                      struct krb5_header *khdr,
651                      int msgcnt, rawobj_t *msgs,
652                      int iovcnt, lnet_kiov_t *iovs,
653                      rawobj_t *cksum)
654 {
655         struct hash_desc        desc;
656         struct sg_table         sgt;
657         struct scatterlist      sg[1];
658         int                     i, rc;
659
660         crypto_hash_setkey(tfm, key->data, key->len);
661         desc.tfm  = tfm;
662         desc.flags= 0;
663
664         crypto_hash_init(&desc);
665
666         for (i = 0; i < msgcnt; i++) {
667                 if (msgs[i].len == 0)
668                         continue;
669
670                 rc = setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
671                 if (rc != 0)
672                         return rc;
673
674                 crypto_hash_update(&desc, sgt.sgl, msgs[i].len);
675
676                 teardown_sgtable(&sgt);
677         }
678
679         for (i = 0; i < iovcnt; i++) {
680                 if (iovs[i].kiov_len == 0)
681                         continue;
682
683                 sg_init_table(sg, 1);
684                 sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
685                             iovs[i].kiov_offset);
686                 crypto_hash_update(&desc, sg, iovs[i].kiov_len);
687         }
688
689         if (khdr) {
690                 rc = setup_sgtable(&sgt, sg, (char *) khdr, sizeof(*khdr));
691                 if (rc != 0)
692                         return rc;
693
694                 crypto_hash_update(&desc, sgt.sgl, sizeof(*khdr));
695
696                 teardown_sgtable(&sgt);
697         }
698
699         return crypto_hash_final(&desc, cksum->data);
700 }
701
702 static inline
703 int krb5_digest_norm(struct crypto_hash *tfm,
704                      struct krb5_keyblock *kb,
705                      struct krb5_header *khdr,
706                      int msgcnt, rawobj_t *msgs,
707                      int iovcnt, lnet_kiov_t *iovs,
708                      rawobj_t *cksum)
709 {
710         struct hash_desc        desc;
711         struct scatterlist      sg[1];
712         struct sg_table         sgt;
713         int                     i, rc;
714
715         LASSERT(kb->kb_tfm);
716         desc.tfm  = tfm;
717         desc.flags= 0;
718
719         crypto_hash_init(&desc);
720
721         for (i = 0; i < msgcnt; i++) {
722                 if (msgs[i].len == 0)
723                         continue;
724
725                 rc = setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
726                 if (rc != 0)
727                         return rc;
728
729                 crypto_hash_update(&desc, sgt.sgl, msgs[i].len);
730
731                 teardown_sgtable(&sgt);
732         }
733
734         for (i = 0; i < iovcnt; i++) {
735                 if (iovs[i].kiov_len == 0)
736                         continue;
737
738                 sg_init_table(sg, 1);
739                 sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
740                             iovs[i].kiov_offset);
741                 crypto_hash_update(&desc, sg, iovs[i].kiov_len);
742         }
743
744         if (khdr) {
745                 rc = setup_sgtable(&sgt, sg, (char *) khdr, sizeof(*khdr));
746                 if (rc != 0)
747                         return rc;
748
749                 crypto_hash_update(&desc, sgt.sgl, sizeof(*khdr));
750
751                 teardown_sgtable(&sgt);
752         }
753
754         crypto_hash_final(&desc, cksum->data);
755
756         return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
757                             cksum->data, cksum->len);
758 }
759
760 /*
761  * compute (keyed/keyless) checksum against the plain text which appended
762  * with krb5 wire token header.
763  */
764 static
765 __s32 krb5_make_checksum(__u32 enctype,
766                          struct krb5_keyblock *kb,
767                          struct krb5_header *khdr,
768                          int msgcnt, rawobj_t *msgs,
769                          int iovcnt, lnet_kiov_t *iovs,
770                          rawobj_t *cksum)
771 {
772         struct krb5_enctype   *ke = &enctypes[enctype];
773         struct crypto_hash    *tfm;
774         __u32                  code = GSS_S_FAILURE;
775         int                    rc;
776
777         if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
778                 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
779                 return GSS_S_FAILURE;
780         }
781
782         cksum->len = crypto_hash_digestsize(tfm);
783         OBD_ALLOC_LARGE(cksum->data, cksum->len);
784         if (!cksum->data) {
785                 cksum->len = 0;
786                 goto out_tfm;
787         }
788
789         if (ke->ke_hash_hmac)
790                 rc = krb5_digest_hmac(tfm, &kb->kb_key,
791                                       khdr, msgcnt, msgs, iovcnt, iovs, cksum);
792         else
793                 rc = krb5_digest_norm(tfm, kb,
794                                       khdr, msgcnt, msgs, iovcnt, iovs, cksum);
795
796         if (rc == 0)
797                 code = GSS_S_COMPLETE;
798 out_tfm:
799         crypto_free_hash(tfm);
800         return code;
801 }
802
803 static void fill_krb5_header(struct krb5_ctx *kctx,
804                              struct krb5_header *khdr,
805                              int privacy)
806 {
807         unsigned char acceptor_flag;
808
809         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
810
811         if (privacy) {
812                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
813                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
814                 khdr->kh_ec = cpu_to_be16(0);
815                 khdr->kh_rrc = cpu_to_be16(0);
816         } else {
817                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
818                 khdr->kh_flags = acceptor_flag;
819                 khdr->kh_ec = cpu_to_be16(0xffff);
820                 khdr->kh_rrc = cpu_to_be16(0xffff);
821         }
822
823         khdr->kh_filler = 0xff;
824         spin_lock(&krb5_seq_lock);
825         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
826         spin_unlock(&krb5_seq_lock);
827 }
828
829 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
830                                 struct krb5_header *khdr,
831                                 int privacy)
832 {
833         unsigned char acceptor_flag;
834         __u16         tok_id, ec_rrc;
835
836         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
837
838         if (privacy) {
839                 tok_id = KG_TOK_WRAP_MSG;
840                 ec_rrc = 0x0;
841         } else {
842                 tok_id = KG_TOK_MIC_MSG;
843                 ec_rrc = 0xffff;
844         }
845
846         /* sanity checks */
847         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
848                 CERROR("bad token id\n");
849                 return GSS_S_DEFECTIVE_TOKEN;
850         }
851         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
852                 CERROR("bad direction flag\n");
853                 return GSS_S_BAD_SIG;
854         }
855         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
856                 CERROR("missing confidential flag\n");
857                 return GSS_S_BAD_SIG;
858         }
859         if (khdr->kh_filler != 0xff) {
860                 CERROR("bad filler\n");
861                 return GSS_S_DEFECTIVE_TOKEN;
862         }
863         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
864             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
865                 CERROR("bad EC or RRC\n");
866                 return GSS_S_DEFECTIVE_TOKEN;
867         }
868         return GSS_S_COMPLETE;
869 }
870
871 static
872 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
873                            int msgcnt,
874                            rawobj_t *msgs,
875                            int iovcnt,
876                            lnet_kiov_t *iovs,
877                            rawobj_t *token)
878 {
879         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
880         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
881         struct krb5_header  *khdr;
882         rawobj_t             cksum = RAWOBJ_EMPTY;
883
884         /* fill krb5 header */
885         LASSERT(token->len >= sizeof(*khdr));
886         khdr = (struct krb5_header *) token->data;
887         fill_krb5_header(kctx, khdr, 0);
888
889         /* checksum */
890         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
891                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
892                 return GSS_S_FAILURE;
893
894         LASSERT(cksum.len >= ke->ke_hash_size);
895         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
896         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
897                ke->ke_hash_size);
898
899         token->len = sizeof(*khdr) + ke->ke_hash_size;
900         rawobj_free(&cksum);
901         return GSS_S_COMPLETE;
902 }
903
904 static
905 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
906                               int msgcnt,
907                               rawobj_t *msgs,
908                               int iovcnt,
909                               lnet_kiov_t *iovs,
910                               rawobj_t *token)
911 {
912         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
913         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
914         struct krb5_header  *khdr;
915         rawobj_t             cksum = RAWOBJ_EMPTY;
916         __u32                major;
917
918         if (token->len < sizeof(*khdr)) {
919                 CERROR("short signature: %u\n", token->len);
920                 return GSS_S_DEFECTIVE_TOKEN;
921         }
922
923         khdr = (struct krb5_header *) token->data;
924
925         major = verify_krb5_header(kctx, khdr, 0);
926         if (major != GSS_S_COMPLETE) {
927                 CERROR("bad krb5 header\n");
928                 return major;
929         }
930
931         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
932                 CERROR("short signature: %u, require %d\n",
933                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
934                 return GSS_S_FAILURE;
935         }
936
937         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
938                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
939                 CERROR("failed to make checksum\n");
940                 return GSS_S_FAILURE;
941         }
942
943         LASSERT(cksum.len >= ke->ke_hash_size);
944         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
945                    ke->ke_hash_size)) {
946                 CERROR("checksum mismatch\n");
947                 rawobj_free(&cksum);
948                 return GSS_S_BAD_SIG;
949         }
950
951         rawobj_free(&cksum);
952         return GSS_S_COMPLETE;
953 }
954
955 static
956 int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
957 {
958         int padding;
959
960         padding = (blocksize - (msg->len & (blocksize - 1))) &
961                   (blocksize - 1);
962         if (!padding)
963                 return 0;
964
965         if (msg->len + padding > msg_buflen) {
966                 CERROR("bufsize %u too small: datalen %u, padding %u\n",
967                         msg_buflen, msg->len, padding);
968                 return -EINVAL;
969         }
970
971         memset(msg->data + msg->len, padding, padding);
972         msg->len += padding;
973         return 0;
974 }
975
976 static
977 int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm,
978                          int mode_ecb,
979                          int inobj_cnt,
980                          rawobj_t *inobjs,
981                          rawobj_t *outobj,
982                          int enc)
983 {
984         struct blkcipher_desc desc;
985         struct scatterlist    src, dst;
986         struct sg_table         sg_src, sg_dst;
987         __u8                  local_iv[16] = {0}, *buf;
988         __u32                 datalen = 0;
989         int                   i, rc;
990         ENTRY;
991
992         buf = outobj->data;
993         desc.tfm  = tfm;
994         desc.info = local_iv;
995         desc.flags = 0;
996
997         for (i = 0; i < inobj_cnt; i++) {
998                 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
999
1000                 rc = setup_sgtable(&sg_src, &src, inobjs[i].data,
1001                                    inobjs[i].len);
1002                 if (rc != 0)
1003                         RETURN(rc);
1004
1005                 rc = setup_sgtable(&sg_dst, &dst, buf,
1006                                    outobj->len - datalen);
1007                 if (rc != 0) {
1008                         teardown_sgtable(&sg_src);
1009                         RETURN(rc);
1010                 }
1011
1012                 if (mode_ecb) {
1013                         if (enc)
1014                                 rc = crypto_blkcipher_encrypt(&desc, sg_dst.sgl,
1015                                                               sg_src.sgl,
1016                                                               inobjs[i].len);
1017                         else
1018                                 rc = crypto_blkcipher_decrypt(&desc, sg_dst.sgl,
1019                                                               sg_src.sgl,
1020                                                               inobjs[i].len);
1021                 } else {
1022                         if (enc)
1023                                 rc = crypto_blkcipher_encrypt_iv(&desc,
1024                                                                  sg_dst.sgl,
1025                                                                  sg_src.sgl,
1026                                                                  inobjs[i].len);
1027                         else
1028                                 rc = crypto_blkcipher_decrypt_iv(&desc,
1029                                                                  sg_dst.sgl,
1030                                                                  sg_src.sgl,
1031                                                                  inobjs[i].len);
1032                 }
1033
1034                 teardown_sgtable(&sg_src);
1035                 teardown_sgtable(&sg_dst);
1036
1037                 if (rc) {
1038                         CERROR("encrypt error %d\n", rc);
1039                         RETURN(rc);
1040                 }
1041
1042                 datalen += inobjs[i].len;
1043                 buf += inobjs[i].len;
1044         }
1045
1046         outobj->len = datalen;
1047         RETURN(0);
1048 }
1049
1050 /*
1051  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
1052  */
1053 static
1054 int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
1055                       struct krb5_header *khdr,
1056                       char *confounder,
1057                       struct ptlrpc_bulk_desc *desc,
1058                       rawobj_t *cipher,
1059                       int adj_nob)
1060 {
1061         struct blkcipher_desc   ciph_desc;
1062         __u8                    local_iv[16] = {0};
1063         struct scatterlist      src, dst;
1064         struct sg_table         sg_src, sg_dst;
1065         int                     blocksize, i, rc, nob = 0;
1066
1067         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1068         LASSERT(desc->bd_iov_count);
1069         LASSERT(GET_ENC_KIOV(desc));
1070
1071         blocksize = crypto_blkcipher_blocksize(tfm);
1072         LASSERT(blocksize > 1);
1073         LASSERT(cipher->len == blocksize + sizeof(*khdr));
1074
1075         ciph_desc.tfm  = tfm;
1076         ciph_desc.info = local_iv;
1077         ciph_desc.flags = 0;
1078
1079         /* encrypt confounder */
1080         rc = setup_sgtable(&sg_src, &src, confounder, blocksize);
1081         if (rc != 0)
1082                 return rc;
1083
1084         rc = setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
1085         if (rc != 0) {
1086                 teardown_sgtable(&sg_src);
1087                 return rc;
1088         }
1089
1090         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
1091                                          sg_src.sgl, blocksize);
1092
1093         teardown_sgtable(&sg_dst);
1094         teardown_sgtable(&sg_src);
1095
1096         if (rc) {
1097                 CERROR("error to encrypt confounder: %d\n", rc);
1098                 return rc;
1099         }
1100
1101         /* encrypt clear pages */
1102         for (i = 0; i < desc->bd_iov_count; i++) {
1103                 sg_init_table(&src, 1);
1104                 sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
1105                             (BD_GET_KIOV(desc, i).kiov_len +
1106                                 blocksize - 1) &
1107                             (~(blocksize - 1)),
1108                             BD_GET_KIOV(desc, i).kiov_offset);
1109                 if (adj_nob)
1110                         nob += src.length;
1111                 sg_init_table(&dst, 1);
1112                 sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
1113                             src.length, src.offset);
1114
1115                 BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
1116                 BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
1117
1118                 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
1119                                                     src.length);
1120                 if (rc) {
1121                         CERROR("error to encrypt page: %d\n", rc);
1122                         return rc;
1123                 }
1124         }
1125
1126         /* encrypt krb5 header */
1127         rc = setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
1128         if (rc != 0)
1129                 return rc;
1130
1131         rc = setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
1132                            sizeof(*khdr));
1133         if (rc != 0) {
1134                 teardown_sgtable(&sg_src);
1135                 return rc;
1136         }
1137
1138         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
1139                                          sizeof(*khdr));
1140
1141         teardown_sgtable(&sg_dst);
1142         teardown_sgtable(&sg_src);
1143
1144         if (rc) {
1145                 CERROR("error to encrypt krb5 header: %d\n", rc);
1146                 return rc;
1147         }
1148
1149         if (adj_nob)
1150                 desc->bd_nob = nob;
1151
1152         return 0;
1153 }
1154
1155 /*
1156  * desc->bd_nob_transferred is the size of cipher text received.
1157  * desc->bd_nob is the target size of plain text supposed to be.
1158  *
1159  * if adj_nob != 0, we adjust each page's kiov_len to the actual
1160  * plain text size.
1161  * - for client read: we don't know data size for each page, so
1162  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
1163  *   be smaller, so we need to adjust it according to
1164  *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
1165  *   this means we DO NOT support the situation that server send an odd size
1166  *   data in a page which is not the last one.
1167  * - for server write: we knows exactly data size for each page being expected,
1168  *   thus kiov_len is accurate already, so we should not adjust it at all.
1169  *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
1170  *   round_up(bd_iov[]->kiov_len) which
1171  *   should have been done by prep_bulk().
1172  */
1173 static
1174 int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
1175                       struct krb5_header *khdr,
1176                       struct ptlrpc_bulk_desc *desc,
1177                       rawobj_t *cipher,
1178                       rawobj_t *plain,
1179                       int adj_nob)
1180 {
1181         struct blkcipher_desc   ciph_desc;
1182         __u8                    local_iv[16] = {0};
1183         struct scatterlist      src, dst;
1184         struct sg_table         sg_src, sg_dst;
1185         int                     ct_nob = 0, pt_nob = 0;
1186         int                     blocksize, i, rc;
1187
1188         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1189         LASSERT(desc->bd_iov_count);
1190         LASSERT(GET_ENC_KIOV(desc));
1191         LASSERT(desc->bd_nob_transferred);
1192
1193         blocksize = crypto_blkcipher_blocksize(tfm);
1194         LASSERT(blocksize > 1);
1195         LASSERT(cipher->len == blocksize + sizeof(*khdr));
1196
1197         ciph_desc.tfm  = tfm;
1198         ciph_desc.info = local_iv;
1199         ciph_desc.flags = 0;
1200
1201         if (desc->bd_nob_transferred % blocksize) {
1202                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
1203                 return -EPROTO;
1204         }
1205
1206         /* decrypt head (confounder) */
1207         rc = setup_sgtable(&sg_src, &src, cipher->data, blocksize);
1208         if (rc != 0)
1209                 return rc;
1210
1211         rc = setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
1212         if (rc != 0) {
1213                 teardown_sgtable(&sg_src);
1214                 return rc;
1215         }
1216
1217         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
1218                                          sg_src.sgl, blocksize);
1219
1220         teardown_sgtable(&sg_dst);
1221         teardown_sgtable(&sg_src);
1222
1223         if (rc) {
1224                 CERROR("error to decrypt confounder: %d\n", rc);
1225                 return rc;
1226         }
1227
1228         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
1229              i++) {
1230                 if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
1231                     != 0 ||
1232                     BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
1233                     != 0) {
1234                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
1235                                i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
1236                                BD_GET_ENC_KIOV(desc, i).kiov_len,
1237                                blocksize);
1238                         return -EFAULT;
1239                 }
1240
1241                 if (adj_nob) {
1242                         if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
1243                             desc->bd_nob_transferred)
1244                                 BD_GET_ENC_KIOV(desc, i).kiov_len =
1245                                         desc->bd_nob_transferred - ct_nob;
1246
1247                         BD_GET_KIOV(desc, i).kiov_len =
1248                           BD_GET_ENC_KIOV(desc, i).kiov_len;
1249                         if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
1250                             desc->bd_nob)
1251                                 BD_GET_KIOV(desc, i).kiov_len =
1252                                   desc->bd_nob - pt_nob;
1253                 } else {
1254                         /* this should be guaranteed by LNET */
1255                         LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
1256                                 kiov_len <=
1257                                 desc->bd_nob_transferred);
1258                         LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
1259                                 BD_GET_ENC_KIOV(desc, i).kiov_len);
1260                 }
1261
1262                 if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
1263                         continue;
1264
1265                 sg_init_table(&src, 1);
1266                 sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
1267                             BD_GET_ENC_KIOV(desc, i).kiov_len,
1268                             BD_GET_ENC_KIOV(desc, i).kiov_offset);
1269                 dst = src;
1270                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
1271                         sg_assign_page(&dst,
1272                                        BD_GET_KIOV(desc, i).kiov_page);
1273
1274                 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
1275                                                  src.length);
1276                 if (rc) {
1277                         CERROR("error to decrypt page: %d\n", rc);
1278                         return rc;
1279                 }
1280
1281                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
1282                         memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
1283                                BD_GET_KIOV(desc, i).kiov_offset,
1284                                page_address(BD_GET_ENC_KIOV(desc, i).
1285                                             kiov_page) +
1286                                BD_GET_KIOV(desc, i).kiov_offset,
1287                                BD_GET_KIOV(desc, i).kiov_len);
1288                 }
1289
1290                 ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
1291                 pt_nob += BD_GET_KIOV(desc, i).kiov_len;
1292         }
1293
1294         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
1295                 CERROR("%d cipher text transferred but only %d decrypted\n",
1296                        desc->bd_nob_transferred, ct_nob);
1297                 return -EFAULT;
1298         }
1299
1300         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
1301                 CERROR("%d plain text expected but only %d received\n",
1302                        desc->bd_nob, pt_nob);
1303                 return -EFAULT;
1304         }
1305
1306         /* if needed, clear up the rest unused iovs */
1307         if (adj_nob)
1308                 while (i < desc->bd_iov_count)
1309                         BD_GET_KIOV(desc, i++).kiov_len = 0;
1310
1311         /* decrypt tail (krb5 header) */
1312         rc = setup_sgtable(&sg_src, &src, cipher->data + blocksize,
1313                            sizeof(*khdr));
1314         if (rc != 0)
1315                 return rc;
1316
1317         rc = setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
1318                            sizeof(*khdr));
1319         if (rc != 0) {
1320                 teardown_sgtable(&sg_src);
1321                 return rc;
1322         }
1323
1324         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
1325                                          sizeof(*khdr));
1326
1327         teardown_sgtable(&sg_src);
1328         teardown_sgtable(&sg_dst);
1329
1330         if (rc) {
1331                 CERROR("error to decrypt tail: %d\n", rc);
1332                 return rc;
1333         }
1334
1335         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
1336                 CERROR("krb5 header doesn't match\n");
1337                 return -EACCES;
1338         }
1339
1340         return 0;
1341 }
1342
1343 static
1344 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
1345                         rawobj_t *gsshdr,
1346                         rawobj_t *msg,
1347                         int msg_buflen,
1348                         rawobj_t *token)
1349 {
1350         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1351         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1352         struct krb5_header  *khdr;
1353         int                  blocksize;
1354         rawobj_t             cksum = RAWOBJ_EMPTY;
1355         rawobj_t             data_desc[3], cipher;
1356         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1357         int                  rc = 0;
1358
1359         LASSERT(ke);
1360         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1361         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
1362                 ke->ke_conf_size >=
1363                 crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
1364
1365         /*
1366          * final token format:
1367          * ---------------------------------------------------
1368          * | krb5 header | cipher text | checksum (16 bytes) |
1369          * ---------------------------------------------------
1370          */
1371
1372         /* fill krb5 header */
1373         LASSERT(token->len >= sizeof(*khdr));
1374         khdr = (struct krb5_header *) token->data;
1375         fill_krb5_header(kctx, khdr, 1);
1376
1377         /* generate confounder */
1378         cfs_get_random_bytes(conf, ke->ke_conf_size);
1379
1380         /* get encryption blocksize. note kc_keye might not associated with
1381          * a tfm, currently only for arcfour-hmac */
1382         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1383                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1384                 blocksize = 1;
1385         } else {
1386                 LASSERT(kctx->kc_keye.kb_tfm);
1387                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1388         }
1389         LASSERT(blocksize <= ke->ke_conf_size);
1390
1391         /* padding the message */
1392         if (add_padding(msg, msg_buflen, blocksize))
1393                 return GSS_S_FAILURE;
1394
1395         /*
1396          * clear text layout for checksum:
1397          * ------------------------------------------------------
1398          * | confounder | gss header | clear msgs | krb5 header |
1399          * ------------------------------------------------------
1400          */
1401         data_desc[0].data = conf;
1402         data_desc[0].len = ke->ke_conf_size;
1403         data_desc[1].data = gsshdr->data;
1404         data_desc[1].len = gsshdr->len;
1405         data_desc[2].data = msg->data;
1406         data_desc[2].len = msg->len;
1407
1408         /* compute checksum */
1409         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1410                                khdr, 3, data_desc, 0, NULL, &cksum))
1411                 return GSS_S_FAILURE;
1412         LASSERT(cksum.len >= ke->ke_hash_size);
1413
1414         /*
1415          * clear text layout for encryption:
1416          * -----------------------------------------
1417          * | confounder | clear msgs | krb5 header |
1418          * -----------------------------------------
1419          */
1420         data_desc[0].data = conf;
1421         data_desc[0].len = ke->ke_conf_size;
1422         data_desc[1].data = msg->data;
1423         data_desc[1].len = msg->len;
1424         data_desc[2].data = (__u8 *) khdr;
1425         data_desc[2].len = sizeof(*khdr);
1426
1427         /* cipher text will be directly inplace */
1428         cipher.data = (__u8 *) (khdr + 1);
1429         cipher.len = token->len - sizeof(*khdr);
1430         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1431
1432         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1433                 rawobj_t                 arc4_keye;
1434                 struct crypto_blkcipher *arc4_tfm;
1435
1436                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1437                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1438                         CERROR("failed to obtain arc4 enc key\n");
1439                         GOTO(arc4_out, rc = -EACCES);
1440                 }
1441
1442                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1443                 if (IS_ERR(arc4_tfm)) {
1444                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1445                         GOTO(arc4_out_key, rc = -EACCES);
1446                 }
1447
1448                 if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1449                                                arc4_keye.len)) {
1450                         CERROR("failed to set arc4 key, len %d\n",
1451                                arc4_keye.len);
1452                         GOTO(arc4_out_tfm, rc = -EACCES);
1453                 }
1454
1455                 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1456                                           3, data_desc, &cipher, 1);
1457 arc4_out_tfm:
1458                 crypto_free_blkcipher(arc4_tfm);
1459 arc4_out_key:
1460                 rawobj_free(&arc4_keye);
1461 arc4_out:
1462                 do {} while(0); /* just to avoid compile warning */
1463         } else {
1464                 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1465                                           3, data_desc, &cipher, 1);
1466         }
1467
1468         if (rc != 0) {
1469                 rawobj_free(&cksum);
1470                 return GSS_S_FAILURE;
1471         }
1472
1473         /* fill in checksum */
1474         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1475         memcpy((char *)(khdr + 1) + cipher.len,
1476                cksum.data + cksum.len - ke->ke_hash_size,
1477                ke->ke_hash_size);
1478         rawobj_free(&cksum);
1479
1480         /* final token length */
1481         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1482         return GSS_S_COMPLETE;
1483 }
1484
1485 static
1486 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1487                              struct ptlrpc_bulk_desc *desc)
1488 {
1489         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1490         int                  blocksize, i;
1491
1492         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1493         LASSERT(desc->bd_iov_count);
1494         LASSERT(GET_ENC_KIOV(desc));
1495         LASSERT(kctx->kc_keye.kb_tfm);
1496
1497         blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1498
1499         for (i = 0; i < desc->bd_iov_count; i++) {
1500                 LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
1501                 /*
1502                  * offset should always start at page boundary of either
1503                  * client or server side.
1504                  */
1505                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
1506                         CERROR("odd offset %d in page %d\n",
1507                                BD_GET_KIOV(desc, i).kiov_offset, i);
1508                         return GSS_S_FAILURE;
1509                 }
1510
1511                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
1512                         BD_GET_KIOV(desc, i).kiov_offset;
1513                 BD_GET_ENC_KIOV(desc, i).kiov_len =
1514                         (BD_GET_KIOV(desc, i).kiov_len +
1515                          blocksize - 1) & (~(blocksize - 1));
1516         }
1517
1518         return GSS_S_COMPLETE;
1519 }
1520
1521 static
1522 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1523                              struct ptlrpc_bulk_desc *desc,
1524                              rawobj_t *token, int adj_nob)
1525 {
1526         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1527         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1528         struct krb5_header  *khdr;
1529         int                  blocksize;
1530         rawobj_t             cksum = RAWOBJ_EMPTY;
1531         rawobj_t             data_desc[1], cipher;
1532         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1533         int                  rc = 0;
1534
1535         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1536         LASSERT(ke);
1537         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1538
1539         /*
1540          * final token format:
1541          * --------------------------------------------------
1542          * | krb5 header | head/tail cipher text | checksum |
1543          * --------------------------------------------------
1544          */
1545
1546         /* fill krb5 header */
1547         LASSERT(token->len >= sizeof(*khdr));
1548         khdr = (struct krb5_header *) token->data;
1549         fill_krb5_header(kctx, khdr, 1);
1550
1551         /* generate confounder */
1552         cfs_get_random_bytes(conf, ke->ke_conf_size);
1553
1554         /* get encryption blocksize. note kc_keye might not associated with
1555          * a tfm, currently only for arcfour-hmac */
1556         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1557                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1558                 blocksize = 1;
1559         } else {
1560                 LASSERT(kctx->kc_keye.kb_tfm);
1561                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1562         }
1563
1564         /*
1565          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1566          * the bulk token size would be exactly (sizeof(krb5_header) +
1567          * blocksize + sizeof(krb5_header) + hashsize)
1568          */
1569         LASSERT(blocksize <= ke->ke_conf_size);
1570         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1571         LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1572
1573         /*
1574          * clear text layout for checksum:
1575          * ------------------------------------------
1576          * | confounder | clear pages | krb5 header |
1577          * ------------------------------------------
1578          */
1579         data_desc[0].data = conf;
1580         data_desc[0].len = ke->ke_conf_size;
1581
1582         /* compute checksum */
1583         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1584                                khdr, 1, data_desc,
1585                                desc->bd_iov_count, GET_KIOV(desc),
1586                                &cksum))
1587                 return GSS_S_FAILURE;
1588         LASSERT(cksum.len >= ke->ke_hash_size);
1589
1590         /*
1591          * clear text layout for encryption:
1592          * ------------------------------------------
1593          * | confounder | clear pages | krb5 header |
1594          * ------------------------------------------
1595          *        |              |             |
1596          *        ----------  (cipher pages)   |
1597          * result token:   |                   |
1598          * -------------------------------------------
1599          * | krb5 header | cipher text | cipher text |
1600          * -------------------------------------------
1601          */
1602         data_desc[0].data = conf;
1603         data_desc[0].len = ke->ke_conf_size;
1604
1605         cipher.data = (__u8 *) (khdr + 1);
1606         cipher.len = blocksize + sizeof(*khdr);
1607
1608         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1609                 LBUG();
1610                 rc = 0;
1611         } else {
1612                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1613                                        conf, desc, &cipher, adj_nob);
1614         }
1615
1616         if (rc != 0) {
1617                 rawobj_free(&cksum);
1618                 return GSS_S_FAILURE;
1619         }
1620
1621         /* fill in checksum */
1622         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1623         memcpy((char *)(khdr + 1) + cipher.len,
1624                cksum.data + cksum.len - ke->ke_hash_size,
1625                ke->ke_hash_size);
1626         rawobj_free(&cksum);
1627
1628         /* final token length */
1629         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1630         return GSS_S_COMPLETE;
1631 }
1632
1633 static
1634 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1635                           rawobj_t        *gsshdr,
1636                           rawobj_t        *token,
1637                           rawobj_t        *msg)
1638 {
1639         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1640         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1641         struct krb5_header  *khdr;
1642         unsigned char       *tmpbuf;
1643         int                  blocksize, bodysize;
1644         rawobj_t             cksum = RAWOBJ_EMPTY;
1645         rawobj_t             cipher_in, plain_out;
1646         rawobj_t             hash_objs[3];
1647         int                  rc = 0;
1648         __u32                major;
1649
1650         LASSERT(ke);
1651
1652         if (token->len < sizeof(*khdr)) {
1653                 CERROR("short signature: %u\n", token->len);
1654                 return GSS_S_DEFECTIVE_TOKEN;
1655         }
1656
1657         khdr = (struct krb5_header *) token->data;
1658
1659         major = verify_krb5_header(kctx, khdr, 1);
1660         if (major != GSS_S_COMPLETE) {
1661                 CERROR("bad krb5 header\n");
1662                 return major;
1663         }
1664
1665         /* block size */
1666         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1667                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1668                 blocksize = 1;
1669         } else {
1670                 LASSERT(kctx->kc_keye.kb_tfm);
1671                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1672         }
1673
1674         /* expected token layout:
1675          * ----------------------------------------
1676          * | krb5 header | cipher text | checksum |
1677          * ----------------------------------------
1678          */
1679         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1680
1681         if (bodysize % blocksize) {
1682                 CERROR("odd bodysize %d\n", bodysize);
1683                 return GSS_S_DEFECTIVE_TOKEN;
1684         }
1685
1686         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1687                 CERROR("incomplete token: bodysize %d\n", bodysize);
1688                 return GSS_S_DEFECTIVE_TOKEN;
1689         }
1690
1691         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1692                 CERROR("buffer too small: %u, require %d\n",
1693                        msg->len, bodysize - ke->ke_conf_size);
1694                 return GSS_S_FAILURE;
1695         }
1696
1697         /* decrypting */
1698         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1699         if (!tmpbuf)
1700                 return GSS_S_FAILURE;
1701
1702         major = GSS_S_FAILURE;
1703
1704         cipher_in.data = (__u8 *) (khdr + 1);
1705         cipher_in.len = bodysize;
1706         plain_out.data = tmpbuf;
1707         plain_out.len = bodysize;
1708
1709         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1710                 rawobj_t                 arc4_keye;
1711                 struct crypto_blkcipher *arc4_tfm;
1712
1713                 cksum.data = token->data + token->len - ke->ke_hash_size;
1714                 cksum.len = ke->ke_hash_size;
1715
1716                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1717                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1718                         CERROR("failed to obtain arc4 enc key\n");
1719                         GOTO(arc4_out, rc = -EACCES);
1720                 }
1721
1722                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1723                 if (IS_ERR(arc4_tfm)) {
1724                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1725                         GOTO(arc4_out_key, rc = -EACCES);
1726                 }
1727
1728                 if (crypto_blkcipher_setkey(arc4_tfm,
1729                                          arc4_keye.data, arc4_keye.len)) {
1730                         CERROR("failed to set arc4 key, len %d\n",
1731                                arc4_keye.len);
1732                         GOTO(arc4_out_tfm, rc = -EACCES);
1733                 }
1734
1735                 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1736                                           1, &cipher_in, &plain_out, 0);
1737 arc4_out_tfm:
1738                 crypto_free_blkcipher(arc4_tfm);
1739 arc4_out_key:
1740                 rawobj_free(&arc4_keye);
1741 arc4_out:
1742                 cksum = RAWOBJ_EMPTY;
1743         } else {
1744                 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1745                                           1, &cipher_in, &plain_out, 0);
1746         }
1747
1748         if (rc != 0) {
1749                 CERROR("error decrypt\n");
1750                 goto out_free;
1751         }
1752         LASSERT(plain_out.len == bodysize);
1753
1754         /* expected clear text layout:
1755          * -----------------------------------------
1756          * | confounder | clear msgs | krb5 header |
1757          * -----------------------------------------
1758          */
1759
1760         /* verify krb5 header in token is not modified */
1761         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1762                    sizeof(*khdr))) {
1763                 CERROR("decrypted krb5 header mismatch\n");
1764                 goto out_free;
1765         }
1766
1767         /* verify checksum, compose clear text as layout:
1768          * ------------------------------------------------------
1769          * | confounder | gss header | clear msgs | krb5 header |
1770          * ------------------------------------------------------
1771          */
1772         hash_objs[0].len = ke->ke_conf_size;
1773         hash_objs[0].data = plain_out.data;
1774         hash_objs[1].len = gsshdr->len;
1775         hash_objs[1].data = gsshdr->data;
1776         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1777         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1778         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1779                                khdr, 3, hash_objs, 0, NULL, &cksum))
1780                 goto out_free;
1781
1782         LASSERT(cksum.len >= ke->ke_hash_size);
1783         if (memcmp((char *)(khdr + 1) + bodysize,
1784                    cksum.data + cksum.len - ke->ke_hash_size,
1785                    ke->ke_hash_size)) {
1786                 CERROR("checksum mismatch\n");
1787                 goto out_free;
1788         }
1789
1790         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1791         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1792
1793         major = GSS_S_COMPLETE;
1794 out_free:
1795         OBD_FREE_LARGE(tmpbuf, bodysize);
1796         rawobj_free(&cksum);
1797         return major;
1798 }
1799
1800 static
1801 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1802                                struct ptlrpc_bulk_desc *desc,
1803                                rawobj_t *token, int adj_nob)
1804 {
1805         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1806         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1807         struct krb5_header  *khdr;
1808         int                  blocksize;
1809         rawobj_t             cksum = RAWOBJ_EMPTY;
1810         rawobj_t             cipher, plain;
1811         rawobj_t             data_desc[1];
1812         int                  rc;
1813         __u32                major;
1814
1815         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1816         LASSERT(ke);
1817
1818         if (token->len < sizeof(*khdr)) {
1819                 CERROR("short signature: %u\n", token->len);
1820                 return GSS_S_DEFECTIVE_TOKEN;
1821         }
1822
1823         khdr = (struct krb5_header *) token->data;
1824
1825         major = verify_krb5_header(kctx, khdr, 1);
1826         if (major != GSS_S_COMPLETE) {
1827                 CERROR("bad krb5 header\n");
1828                 return major;
1829         }
1830
1831         /* block size */
1832         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1833                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1834                 blocksize = 1;
1835                 LBUG();
1836         } else {
1837                 LASSERT(kctx->kc_keye.kb_tfm);
1838                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1839         }
1840         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1841
1842         /*
1843          * token format is expected as:
1844          * -----------------------------------------------
1845          * | krb5 header | head/tail cipher text | cksum |
1846          * -----------------------------------------------
1847          */
1848         if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1849                          ke->ke_hash_size) {
1850                 CERROR("short token size: %u\n", token->len);
1851                 return GSS_S_DEFECTIVE_TOKEN;
1852         }
1853
1854         cipher.data = (__u8 *) (khdr + 1);
1855         cipher.len = blocksize + sizeof(*khdr);
1856         plain.data = cipher.data;
1857         plain.len = cipher.len;
1858
1859         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1860                                desc, &cipher, &plain, adj_nob);
1861         if (rc)
1862                 return GSS_S_DEFECTIVE_TOKEN;
1863
1864         /*
1865          * verify checksum, compose clear text as layout:
1866          * ------------------------------------------
1867          * | confounder | clear pages | krb5 header |
1868          * ------------------------------------------
1869          */
1870         data_desc[0].data = plain.data;
1871         data_desc[0].len = blocksize;
1872
1873         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1874                                khdr, 1, data_desc,
1875                                desc->bd_iov_count,
1876                                GET_KIOV(desc),
1877                                &cksum))
1878                 return GSS_S_FAILURE;
1879         LASSERT(cksum.len >= ke->ke_hash_size);
1880
1881         if (memcmp(plain.data + blocksize + sizeof(*khdr),
1882                    cksum.data + cksum.len - ke->ke_hash_size,
1883                    ke->ke_hash_size)) {
1884                 CERROR("checksum mismatch\n");
1885                 rawobj_free(&cksum);
1886                 return GSS_S_BAD_SIG;
1887         }
1888
1889         rawobj_free(&cksum);
1890         return GSS_S_COMPLETE;
1891 }
1892
1893 int gss_display_kerberos(struct gss_ctx        *ctx,
1894                          char                  *buf,
1895                          int                    bufsize)
1896 {
1897         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1898         int                 written;
1899
1900         written = snprintf(buf, bufsize, "krb5 (%s)",
1901                            enctype2str(kctx->kc_enctype));
1902         return written;
1903 }
1904
1905 static struct gss_api_ops gss_kerberos_ops = {
1906         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1907         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1908         .gss_inquire_context        = gss_inquire_context_kerberos,
1909         .gss_get_mic                = gss_get_mic_kerberos,
1910         .gss_verify_mic             = gss_verify_mic_kerberos,
1911         .gss_wrap                   = gss_wrap_kerberos,
1912         .gss_unwrap                 = gss_unwrap_kerberos,
1913         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1914         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1915         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1916         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1917         .gss_display                = gss_display_kerberos,
1918 };
1919
1920 static struct subflavor_desc gss_kerberos_sfs[] = {
1921         {
1922                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1923                 .sf_qop         = 0,
1924                 .sf_service     = SPTLRPC_SVC_NULL,
1925                 .sf_name        = "krb5n"
1926         },
1927         {
1928                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1929                 .sf_qop         = 0,
1930                 .sf_service     = SPTLRPC_SVC_AUTH,
1931                 .sf_name        = "krb5a"
1932         },
1933         {
1934                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1935                 .sf_qop         = 0,
1936                 .sf_service     = SPTLRPC_SVC_INTG,
1937                 .sf_name        = "krb5i"
1938         },
1939         {
1940                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1941                 .sf_qop         = 0,
1942                 .sf_service     = SPTLRPC_SVC_PRIV,
1943                 .sf_name        = "krb5p"
1944         },
1945 };
1946
1947 /*
1948  * currently we leave module owner NULL
1949  */
1950 static struct gss_api_mech gss_kerberos_mech = {
1951         .gm_owner       = NULL, /*THIS_MODULE, */
1952         .gm_name        = "krb5",
1953         .gm_oid         = (rawobj_t)
1954                                 {9, "\052\206\110\206\367\022\001\002\002"},
1955         .gm_ops         = &gss_kerberos_ops,
1956         .gm_sf_num      = 4,
1957         .gm_sfs         = gss_kerberos_sfs,
1958 };
1959
1960 int __init init_kerberos_module(void)
1961 {
1962         int status;
1963
1964         spin_lock_init(&krb5_seq_lock);
1965
1966         status = lgss_mech_register(&gss_kerberos_mech);
1967         if (status)
1968                 CERROR("Failed to register kerberos gss mechanism!\n");
1969         return status;
1970 }
1971
1972 void cleanup_kerberos_module(void)
1973 {
1974         lgss_mech_unregister(&gss_kerberos_mech);
1975 }