Whamcloud - gitweb
LU-1347 build: remove the vim/emacs modelines
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, Whamcloud, Inc.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #ifndef EXPORT_SYMTAB
52 # define EXPORT_SYMTAB
53 #endif
54 #define DEBUG_SUBSYSTEM S_SEC
55 #ifdef __KERNEL__
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/slab.h>
59 #include <linux/crypto.h>
60 #include <linux/mutex.h>
61 #else
62 #include <liblustre.h>
63 #endif
64
65 #include <obd.h>
66 #include <obd_class.h>
67 #include <obd_support.h>
68 #include <lustre/lustre_idl.h>
69 #include <lustre_net.h>
70 #include <lustre_import.h>
71 #include <lustre_sec.h>
72
73 #include "gss_err.h"
74 #include "gss_internal.h"
75 #include "gss_api.h"
76 #include "gss_asn1.h"
77 #include "gss_krb5.h"
78
79 static cfs_spinlock_t krb5_seq_lock;
80
81 struct krb5_enctype {
82         char           *ke_dispname;
83         char           *ke_enc_name;            /* linux tfm name */
84         char           *ke_hash_name;           /* linux tfm name */
85         int             ke_enc_mode;            /* linux tfm mode */
86         int             ke_hash_size;           /* checksum size */
87         int             ke_conf_size;           /* confounder size */
88         unsigned int    ke_hash_hmac:1;         /* is hmac? */
89 };
90
91 /*
92  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
93  * but currently we simply CBC with padding, because linux doesn't support CTS
94  * yet. this need to be fixed in the future.
95  */
96 static struct krb5_enctype enctypes[] = {
97         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
98                 "des-cbc-md5",
99                 "cbc(des)",
100                 "md5",
101                 0,
102                 16,
103                 8,
104                 0,
105         },
106         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
107                 "des3-hmac-sha1",
108                 "cbc(des3_ede)",
109                 "hmac(sha1)",
110                 0,
111                 20,
112                 8,
113                 1,
114         },
115         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
116                 "aes128-cts-hmac-sha1-96",
117                 "cbc(aes)",
118                 "hmac(sha1)",
119                 0,
120                 12,
121                 16,
122                 1,
123         },
124         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
125                 "aes256-cts-hmac-sha1-96",
126                 "cbc(aes)",
127                 "hmac(sha1)",
128                 0,
129                 12,
130                 16,
131                 1,
132         },
133         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
134                 "arcfour-hmac-md5",
135                 "ecb(arc4)",
136                 "hmac(md5)",
137                 0,
138                 16,
139                 8,
140                 1,
141         },
142 };
143
144 #define MAX_ENCTYPES    sizeof(enctypes)/sizeof(struct krb5_enctype)
145
146 static const char * enctype2str(__u32 enctype)
147 {
148         if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
149                 return enctypes[enctype].ke_dispname;
150
151         return "unknown";
152 }
153
154 static
155 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
156 {
157         kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
158         if (kb->kb_tfm == NULL) {
159                 CERROR("failed to alloc tfm: %s, mode %d\n",
160                        alg_name, alg_mode);
161                 return -1;
162         }
163
164         if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
165                 CERROR("failed to set %s key, len %d\n",
166                        alg_name, kb->kb_key.len);
167                 return -1;
168         }
169
170         return 0;
171 }
172
173 static
174 int krb5_init_keys(struct krb5_ctx *kctx)
175 {
176         struct krb5_enctype *ke;
177
178         if (kctx->kc_enctype >= MAX_ENCTYPES ||
179             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
180                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
181                 return -1;
182         }
183
184         ke = &enctypes[kctx->kc_enctype];
185
186         /* tfm arc4 is stateful, user should alloc-use-free by his own */
187         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
188             keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
189                 return -1;
190
191         /* tfm hmac is stateful, user should alloc-use-free by his own */
192         if (ke->ke_hash_hmac == 0 &&
193             keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
194                 return -1;
195         if (ke->ke_hash_hmac == 0 &&
196             keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
197                 return -1;
198
199         return 0;
200 }
201
202 static
203 void keyblock_free(struct krb5_keyblock *kb)
204 {
205         rawobj_free(&kb->kb_key);
206         if (kb->kb_tfm)
207                 ll_crypto_free_blkcipher(kb->kb_tfm);
208 }
209
210 static
211 int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
212 {
213         return rawobj_dup(&new->kb_key, &kb->kb_key);
214 }
215
216 static
217 int get_bytes(char **ptr, const char *end, void *res, int len)
218 {
219         char *p, *q;
220         p = *ptr;
221         q = p + len;
222         if (q > end || q < p)
223                 return -1;
224         memcpy(res, p, len);
225         *ptr = q;
226         return 0;
227 }
228
229 static
230 int get_rawobj(char **ptr, const char *end, rawobj_t *res)
231 {
232         char   *p, *q;
233         __u32   len;
234
235         p = *ptr;
236         if (get_bytes(&p, end, &len, sizeof(len)))
237                 return -1;
238
239         q = p + len;
240         if (q > end || q < p)
241                 return -1;
242
243         OBD_ALLOC_LARGE(res->data, len);
244         if (!res->data)
245                 return -1;
246
247         res->len = len;
248         memcpy(res->data, p, len);
249         *ptr = q;
250         return 0;
251 }
252
253 static
254 int get_keyblock(char **ptr, const char *end,
255                  struct krb5_keyblock *kb, __u32 keysize)
256 {
257         char *buf;
258
259         OBD_ALLOC_LARGE(buf, keysize);
260         if (buf == NULL)
261                 return -1;
262
263         if (get_bytes(ptr, end, buf, keysize)) {
264                 OBD_FREE_LARGE(buf, keysize);
265                 return -1;
266         }
267
268         kb->kb_key.len = keysize;
269         kb->kb_key.data = buf;
270         return 0;
271 }
272
273 static
274 void delete_context_kerberos(struct krb5_ctx *kctx)
275 {
276         rawobj_free(&kctx->kc_mech_used);
277
278         keyblock_free(&kctx->kc_keye);
279         keyblock_free(&kctx->kc_keyi);
280         keyblock_free(&kctx->kc_keyc);
281 }
282
283 static
284 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
285 {
286         unsigned int    tmp_uint, keysize;
287
288         /* seed_init flag */
289         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
290                 goto out_err;
291         kctx->kc_seed_init = (tmp_uint != 0);
292
293         /* seed */
294         if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
295                 goto out_err;
296
297         /* sign/seal algorithm, not really used now */
298         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
299             get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
300                 goto out_err;
301
302         /* end time */
303         if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
304                 goto out_err;
305
306         /* seq send */
307         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
308                 goto out_err;
309         kctx->kc_seq_send = tmp_uint;
310
311         /* mech oid */
312         if (get_rawobj(&p, end, &kctx->kc_mech_used))
313                 goto out_err;
314
315         /* old style enc/seq keys in format:
316          *   - enctype (u32)
317          *   - keysize (u32)
318          *   - keydata
319          * we decompose them to fit into the new context
320          */
321
322         /* enc key */
323         if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
324                 goto out_err;
325
326         if (get_bytes(&p, end, &keysize, sizeof(keysize)))
327                 goto out_err;
328
329         if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
330                 goto out_err;
331
332         /* seq key */
333         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
334             tmp_uint != kctx->kc_enctype)
335                 goto out_err;
336
337         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
338             tmp_uint != keysize)
339                 goto out_err;
340
341         if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
342                 goto out_err;
343
344         /* old style fallback */
345         if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
346                 goto out_err;
347
348         if (p != end)
349                 goto out_err;
350
351         CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
352         return 0;
353 out_err:
354         return GSS_S_FAILURE;
355 }
356
357 /* Flags for version 2 context flags */
358 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
359 #define KRB5_CTX_FLAG_CFX               0x00000002
360 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
361
362 static
363 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
364 {
365         unsigned int    tmp_uint, keysize;
366
367         /* end time */
368         if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
369                 goto out_err;
370
371         /* flags */
372         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
373                 goto out_err;
374
375         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
376                 kctx->kc_initiate = 1;
377         if (tmp_uint & KRB5_CTX_FLAG_CFX)
378                 kctx->kc_cfx = 1;
379         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
380                 kctx->kc_have_acceptor_subkey = 1;
381
382         /* seq send */
383         if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
384                 goto out_err;
385
386         /* enctype */
387         if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
388                 goto out_err;
389
390         /* size of each key */
391         if (get_bytes(&p, end, &keysize, sizeof(keysize)))
392                 goto out_err;
393
394         /* number of keys - should always be 3 */
395         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
396                 goto out_err;
397
398         if (tmp_uint != 3) {
399                 CERROR("Invalid number of keys: %u\n", tmp_uint);
400                 goto out_err;
401         }
402
403         /* ke */
404         if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
405                 goto out_err;
406         /* ki */
407         if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
408                 goto out_err;
409         /* ki */
410         if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
411                 goto out_err;
412
413         CDEBUG(D_SEC, "succesfully imported v2 context\n");
414         return 0;
415 out_err:
416         return GSS_S_FAILURE;
417 }
418
419 /*
420  * The whole purpose here is trying to keep user level gss context parsing
421  * from nfs-utils unchanged as possible as we can, they are not quite mature
422  * yet, and many stuff still not clear, like heimdal etc.
423  */
424 static
425 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
426                                       struct gss_ctx *gctx)
427 {
428         struct krb5_ctx *kctx;
429         char            *p = (char *) inbuf->data;
430         char            *end = (char *) (inbuf->data + inbuf->len);
431         unsigned int     tmp_uint, rc;
432
433         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
434                 CERROR("Fail to read version\n");
435                 return GSS_S_FAILURE;
436         }
437
438         /* only support 0, 1 for the moment */
439         if (tmp_uint > 2) {
440                 CERROR("Invalid version %u\n", tmp_uint);
441                 return GSS_S_FAILURE;
442         }
443
444         OBD_ALLOC_PTR(kctx);
445         if (!kctx)
446                 return GSS_S_FAILURE;
447
448         if (tmp_uint == 0 || tmp_uint == 1) {
449                 kctx->kc_initiate = tmp_uint;
450                 rc = import_context_rfc1964(kctx, p, end);
451         } else {
452                 rc = import_context_rfc4121(kctx, p, end);
453         }
454
455         if (rc == 0)
456                 rc = krb5_init_keys(kctx);
457
458         if (rc) {
459                 delete_context_kerberos(kctx);
460                 OBD_FREE_PTR(kctx);
461
462                 return GSS_S_FAILURE;
463         }
464
465         gctx->internal_ctx_id = kctx;
466         return GSS_S_COMPLETE;
467 }
468
469 static
470 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
471                                         struct gss_ctx *gctx_new)
472 {
473         struct krb5_ctx *kctx = gctx->internal_ctx_id;
474         struct krb5_ctx *knew;
475
476         OBD_ALLOC_PTR(knew);
477         if (!knew)
478                 return GSS_S_FAILURE;
479
480         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
481         knew->kc_cfx = kctx->kc_cfx;
482         knew->kc_seed_init = kctx->kc_seed_init;
483         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
484         knew->kc_endtime = kctx->kc_endtime;
485
486         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
487         knew->kc_seq_send = kctx->kc_seq_recv;
488         knew->kc_seq_recv = kctx->kc_seq_send;
489         knew->kc_enctype = kctx->kc_enctype;
490
491         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
492                 goto out_err;
493
494         if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
495                 goto out_err;
496         if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
497                 goto out_err;
498         if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
499                 goto out_err;
500         if (krb5_init_keys(knew))
501                 goto out_err;
502
503         gctx_new->internal_ctx_id = knew;
504         CDEBUG(D_SEC, "succesfully copied reverse context\n");
505         return GSS_S_COMPLETE;
506
507 out_err:
508         delete_context_kerberos(knew);
509         OBD_FREE_PTR(knew);
510         return GSS_S_FAILURE;
511 }
512
513 static
514 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
515                                    unsigned long  *endtime)
516 {
517         struct krb5_ctx *kctx = gctx->internal_ctx_id;
518
519         *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
520         return GSS_S_COMPLETE;
521 }
522
523 static
524 void gss_delete_sec_context_kerberos(void *internal_ctx)
525 {
526         struct krb5_ctx *kctx = internal_ctx;
527
528         delete_context_kerberos(kctx);
529         OBD_FREE_PTR(kctx);
530 }
531
532 static
533 void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
534 {
535         sg->page = virt_to_page(ptr);
536         sg->offset = offset_in_page(ptr);
537         sg->length = len;
538 }
539
540 static
541 __u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
542                    int decrypt,
543                    void * iv,
544                    void * in,
545                    void * out,
546                    int length)
547 {
548         struct blkcipher_desc desc;
549         struct scatterlist    sg;
550         __u8 local_iv[16] = {0};
551         __u32 ret = -EINVAL;
552
553         LASSERT(tfm);
554         desc.tfm  = tfm;
555         desc.info = local_iv;
556         desc.flags= 0;
557
558         if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
559                 CERROR("output length %d mismatch blocksize %d\n",
560                        length, ll_crypto_blkcipher_blocksize(tfm));
561                 goto out;
562         }
563
564         if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
565                 CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
566                 goto out;
567         }
568
569         if (iv)
570                 memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
571
572         memcpy(out, in, length);
573         buf_to_sg(&sg, out, length);
574
575         if (decrypt)
576                 ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
577         else
578                 ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
579
580 out:
581         return(ret);
582 }
583
584 #ifdef HAVE_ASYNC_BLOCK_CIPHER
585
586 static inline
587 int krb5_digest_hmac(struct ll_crypto_hash *tfm,
588                      rawobj_t *key,
589                      struct krb5_header *khdr,
590                      int msgcnt, rawobj_t *msgs,
591                      int iovcnt, lnet_kiov_t *iovs,
592                      rawobj_t *cksum)
593 {
594         struct hash_desc   desc;
595         struct scatterlist sg[1];
596         int                i;
597
598         ll_crypto_hash_setkey(tfm, key->data, key->len);
599         desc.tfm  = tfm;
600         desc.flags= 0;
601
602         ll_crypto_hash_init(&desc);
603
604         for (i = 0; i < msgcnt; i++) {
605                 if (msgs[i].len == 0)
606                         continue;
607                 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
608                 ll_crypto_hash_update(&desc, sg, msgs[i].len);
609         }
610
611         for (i = 0; i < iovcnt; i++) {
612                 if (iovs[i].kiov_len == 0)
613                         continue;
614                 sg[0].page = iovs[i].kiov_page;
615                 sg[0].offset = iovs[i].kiov_offset;
616                 sg[0].length = iovs[i].kiov_len;
617                 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
618         }
619
620         if (khdr) {
621                 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
622                 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
623         }
624
625         return ll_crypto_hash_final(&desc, cksum->data);
626 }
627
628 #else /* ! HAVE_ASYNC_BLOCK_CIPHER */
629
630 static inline
631 int krb5_digest_hmac(struct ll_crypto_hash *tfm,
632                      rawobj_t *key,
633                      struct krb5_header *khdr,
634                      int msgcnt, rawobj_t *msgs,
635                      int iovcnt, lnet_kiov_t *iovs,
636                      rawobj_t *cksum)
637 {
638         struct scatterlist sg[1];
639         __u32              keylen = key->len, i;
640
641         crypto_hmac_init(tfm, key->data, &keylen);
642
643         for (i = 0; i < msgcnt; i++) {
644                 if (msgs[i].len == 0)
645                         continue;
646                 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
647                 crypto_hmac_update(tfm, sg, 1);
648         }
649
650         for (i = 0; i < iovcnt; i++) {
651                 if (iovs[i].kiov_len == 0)
652                         continue;
653                 sg[0].page = iovs[i].kiov_page;
654                 sg[0].offset = iovs[i].kiov_offset;
655                 sg[0].length = iovs[i].kiov_len;
656                 crypto_hmac_update(tfm, sg, 1);
657         }
658
659         if (khdr) {
660                 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
661                 crypto_hmac_update(tfm, sg, 1);
662         }
663
664         crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
665         return 0;
666 }
667
668 #endif /* HAVE_ASYNC_BLOCK_CIPHER */
669
670 static inline
671 int krb5_digest_norm(struct ll_crypto_hash *tfm,
672                      struct krb5_keyblock *kb,
673                      struct krb5_header *khdr,
674                      int msgcnt, rawobj_t *msgs,
675                      int iovcnt, lnet_kiov_t *iovs,
676                      rawobj_t *cksum)
677 {
678         struct hash_desc   desc;
679         struct scatterlist sg[1];
680         int                i;
681
682         LASSERT(kb->kb_tfm);
683         desc.tfm  = tfm;
684         desc.flags= 0;
685
686         ll_crypto_hash_init(&desc);
687
688         for (i = 0; i < msgcnt; i++) {
689                 if (msgs[i].len == 0)
690                         continue;
691                 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
692                 ll_crypto_hash_update(&desc, sg, msgs[i].len);
693         }
694
695         for (i = 0; i < iovcnt; i++) {
696                 if (iovs[i].kiov_len == 0)
697                         continue;
698                 sg[0].page = iovs[i].kiov_page;
699                 sg[0].offset = iovs[i].kiov_offset;
700                 sg[0].length = iovs[i].kiov_len;
701                 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
702         }
703
704         if (khdr) {
705                 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
706                 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
707         }
708
709         ll_crypto_hash_final(&desc, cksum->data);
710
711         return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
712                             cksum->data, cksum->len);
713 }
714
715 /*
716  * compute (keyed/keyless) checksum against the plain text which appended
717  * with krb5 wire token header.
718  */
719 static
720 __s32 krb5_make_checksum(__u32 enctype,
721                          struct krb5_keyblock *kb,
722                          struct krb5_header *khdr,
723                          int msgcnt, rawobj_t *msgs,
724                          int iovcnt, lnet_kiov_t *iovs,
725                          rawobj_t *cksum)
726 {
727         struct krb5_enctype   *ke = &enctypes[enctype];
728         struct ll_crypto_hash *tfm;
729         __u32                  code = GSS_S_FAILURE;
730         int                    rc;
731
732         if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
733                 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
734                 return GSS_S_FAILURE;
735         }
736
737         cksum->len = ll_crypto_hash_digestsize(tfm);
738         OBD_ALLOC_LARGE(cksum->data, cksum->len);
739         if (!cksum->data) {
740                 cksum->len = 0;
741                 goto out_tfm;
742         }
743
744         if (ke->ke_hash_hmac)
745                 rc = krb5_digest_hmac(tfm, &kb->kb_key,
746                                       khdr, msgcnt, msgs, iovcnt, iovs, cksum);
747         else
748                 rc = krb5_digest_norm(tfm, kb,
749                                       khdr, msgcnt, msgs, iovcnt, iovs, cksum);
750
751         if (rc == 0)
752                 code = GSS_S_COMPLETE;
753 out_tfm:
754         ll_crypto_free_hash(tfm);
755         return code;
756 }
757
758 static void fill_krb5_header(struct krb5_ctx *kctx,
759                              struct krb5_header *khdr,
760                              int privacy)
761 {
762         unsigned char acceptor_flag;
763
764         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
765
766         if (privacy) {
767                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
768                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
769                 khdr->kh_ec = cpu_to_be16(0);
770                 khdr->kh_rrc = cpu_to_be16(0);
771         } else {
772                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
773                 khdr->kh_flags = acceptor_flag;
774                 khdr->kh_ec = cpu_to_be16(0xffff);
775                 khdr->kh_rrc = cpu_to_be16(0xffff);
776         }
777
778         khdr->kh_filler = 0xff;
779         cfs_spin_lock(&krb5_seq_lock);
780         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
781         cfs_spin_unlock(&krb5_seq_lock);
782 }
783
784 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
785                                 struct krb5_header *khdr,
786                                 int privacy)
787 {
788         unsigned char acceptor_flag;
789         __u16         tok_id, ec_rrc;
790
791         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
792
793         if (privacy) {
794                 tok_id = KG_TOK_WRAP_MSG;
795                 ec_rrc = 0x0;
796         } else {
797                 tok_id = KG_TOK_MIC_MSG;
798                 ec_rrc = 0xffff;
799         }
800
801         /* sanity checks */
802         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
803                 CERROR("bad token id\n");
804                 return GSS_S_DEFECTIVE_TOKEN;
805         }
806         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
807                 CERROR("bad direction flag\n");
808                 return GSS_S_BAD_SIG;
809         }
810         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
811                 CERROR("missing confidential flag\n");
812                 return GSS_S_BAD_SIG;
813         }
814         if (khdr->kh_filler != 0xff) {
815                 CERROR("bad filler\n");
816                 return GSS_S_DEFECTIVE_TOKEN;
817         }
818         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
819             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
820                 CERROR("bad EC or RRC\n");
821                 return GSS_S_DEFECTIVE_TOKEN;
822         }
823         return GSS_S_COMPLETE;
824 }
825
826 static
827 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
828                            int msgcnt,
829                            rawobj_t *msgs,
830                            int iovcnt,
831                            lnet_kiov_t *iovs,
832                            rawobj_t *token)
833 {
834         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
835         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
836         struct krb5_header  *khdr;
837         rawobj_t             cksum = RAWOBJ_EMPTY;
838
839         /* fill krb5 header */
840         LASSERT(token->len >= sizeof(*khdr));
841         khdr = (struct krb5_header *) token->data;
842         fill_krb5_header(kctx, khdr, 0);
843
844         /* checksum */
845         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
846                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
847                 return GSS_S_FAILURE;
848
849         LASSERT(cksum.len >= ke->ke_hash_size);
850         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
851         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
852                ke->ke_hash_size);
853
854         token->len = sizeof(*khdr) + ke->ke_hash_size;
855         rawobj_free(&cksum);
856         return GSS_S_COMPLETE;
857 }
858
859 static
860 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
861                               int msgcnt,
862                               rawobj_t *msgs,
863                               int iovcnt,
864                               lnet_kiov_t *iovs,
865                               rawobj_t *token)
866 {
867         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
868         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
869         struct krb5_header  *khdr;
870         rawobj_t             cksum = RAWOBJ_EMPTY;
871         __u32                major;
872
873         if (token->len < sizeof(*khdr)) {
874                 CERROR("short signature: %u\n", token->len);
875                 return GSS_S_DEFECTIVE_TOKEN;
876         }
877
878         khdr = (struct krb5_header *) token->data;
879
880         major = verify_krb5_header(kctx, khdr, 0);
881         if (major != GSS_S_COMPLETE) {
882                 CERROR("bad krb5 header\n");
883                 return major;
884         }
885
886         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
887                 CERROR("short signature: %u, require %d\n",
888                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
889                 return GSS_S_FAILURE;
890         }
891
892         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
893                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
894                 CERROR("failed to make checksum\n");
895                 return GSS_S_FAILURE;
896         }
897
898         LASSERT(cksum.len >= ke->ke_hash_size);
899         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
900                    ke->ke_hash_size)) {
901                 CERROR("checksum mismatch\n");
902                 rawobj_free(&cksum);
903                 return GSS_S_BAD_SIG;
904         }
905
906         rawobj_free(&cksum);
907         return GSS_S_COMPLETE;
908 }
909
910 static
911 int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
912 {
913         int padding;
914
915         padding = (blocksize - (msg->len & (blocksize - 1))) &
916                   (blocksize - 1);
917         if (!padding)
918                 return 0;
919
920         if (msg->len + padding > msg_buflen) {
921                 CERROR("bufsize %u too small: datalen %u, padding %u\n",
922                         msg_buflen, msg->len, padding);
923                 return -EINVAL;
924         }
925
926         memset(msg->data + msg->len, padding, padding);
927         msg->len += padding;
928         return 0;
929 }
930
931 static
932 int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
933                          int mode_ecb,
934                          int inobj_cnt,
935                          rawobj_t *inobjs,
936                          rawobj_t *outobj,
937                          int enc)
938 {
939         struct blkcipher_desc desc;
940         struct scatterlist    src, dst;
941         __u8                  local_iv[16] = {0}, *buf;
942         __u32                 datalen = 0;
943         int                   i, rc;
944         ENTRY;
945
946         buf = outobj->data;
947         desc.tfm  = tfm;
948         desc.info = local_iv;
949         desc.flags = 0;
950
951         for (i = 0; i < inobj_cnt; i++) {
952                 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
953
954                 buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
955                 buf_to_sg(&dst, buf, outobj->len - datalen);
956
957                 if (mode_ecb) {
958                         if (enc)
959                                 rc = ll_crypto_blkcipher_encrypt(
960                                         &desc, &dst, &src, src.length);
961                         else
962                                 rc = ll_crypto_blkcipher_decrypt(
963                                         &desc, &dst, &src, src.length);
964                 } else {
965                         if (enc)
966                                 rc = ll_crypto_blkcipher_encrypt_iv(
967                                         &desc, &dst, &src, src.length);
968                         else
969                                 rc = ll_crypto_blkcipher_decrypt_iv(
970                                         &desc, &dst, &src, src.length);
971                 }
972
973                 if (rc) {
974                         CERROR("encrypt error %d\n", rc);
975                         RETURN(rc);
976                 }
977
978                 datalen += inobjs[i].len;
979                 buf += inobjs[i].len;
980         }
981
982         outobj->len = datalen;
983         RETURN(0);
984 }
985
986 /*
987  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
988  */
989 static
990 int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
991                       struct krb5_header *khdr,
992                       char *confounder,
993                       struct ptlrpc_bulk_desc *desc,
994                       rawobj_t *cipher,
995                       int adj_nob)
996 {
997         struct blkcipher_desc   ciph_desc;
998         __u8                    local_iv[16] = {0};
999         struct scatterlist      src, dst;
1000         int                     blocksize, i, rc, nob = 0;
1001
1002         LASSERT(desc->bd_iov_count);
1003         LASSERT(desc->bd_enc_iov);
1004
1005         blocksize = ll_crypto_blkcipher_blocksize(tfm);
1006         LASSERT(blocksize > 1);
1007         LASSERT(cipher->len == blocksize + sizeof(*khdr));
1008
1009         ciph_desc.tfm  = tfm;
1010         ciph_desc.info = local_iv;
1011         ciph_desc.flags = 0;
1012
1013         /* encrypt confounder */
1014         buf_to_sg(&src, confounder, blocksize);
1015         buf_to_sg(&dst, cipher->data, blocksize);
1016
1017         rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
1018         if (rc) {
1019                 CERROR("error to encrypt confounder: %d\n", rc);
1020                 return rc;
1021         }
1022
1023         /* encrypt clear pages */
1024         for (i = 0; i < desc->bd_iov_count; i++) {
1025                 src.page = desc->bd_iov[i].kiov_page;
1026                 src.offset = desc->bd_iov[i].kiov_offset;
1027                 src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
1028                              (~(blocksize - 1));
1029
1030                 if (adj_nob)
1031                         nob += src.length;
1032
1033                 dst.page = desc->bd_enc_iov[i].kiov_page;
1034                 dst.offset = src.offset;
1035                 dst.length = src.length;
1036
1037                 desc->bd_enc_iov[i].kiov_offset = dst.offset;
1038                 desc->bd_enc_iov[i].kiov_len = dst.length;
1039
1040                 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
1041                                                     src.length);
1042                 if (rc) {
1043                         CERROR("error to encrypt page: %d\n", rc);
1044                         return rc;
1045                 }
1046         }
1047
1048         /* encrypt krb5 header */
1049         buf_to_sg(&src, khdr, sizeof(*khdr));
1050         buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1051
1052         rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
1053                                             &dst, &src, sizeof(*khdr));
1054         if (rc) {
1055                 CERROR("error to encrypt krb5 header: %d\n", rc);
1056                 return rc;
1057         }
1058
1059         if (adj_nob)
1060                 desc->bd_nob = nob;
1061
1062         return 0;
1063 }
1064
1065 /*
1066  * desc->bd_nob_transferred is the size of cipher text received.
1067  * desc->bd_nob is the target size of plain text supposed to be.
1068  *
1069  * if adj_nob != 0, we adjust each page's kiov_len to the actual
1070  * plain text size.
1071  * - for client read: we don't know data size for each page, so
1072  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
1073  *   be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
1074  *   this means we DO NOT support the situation that server send an odd size
1075  *   data in a page which is not the last one.
1076  * - for server write: we knows exactly data size for each page being expected,
1077  *   thus kiov_len is accurate already, so we should not adjust it at all.
1078  *   and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
1079  *   should have been done by prep_bulk().
1080  */
1081 static
1082 int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
1083                       struct krb5_header *khdr,
1084                       struct ptlrpc_bulk_desc *desc,
1085                       rawobj_t *cipher,
1086                       rawobj_t *plain,
1087                       int adj_nob)
1088 {
1089         struct blkcipher_desc   ciph_desc;
1090         __u8                    local_iv[16] = {0};
1091         struct scatterlist      src, dst;
1092         int                     ct_nob = 0, pt_nob = 0;
1093         int                     blocksize, i, rc;
1094
1095         LASSERT(desc->bd_iov_count);
1096         LASSERT(desc->bd_enc_iov);
1097         LASSERT(desc->bd_nob_transferred);
1098
1099         blocksize = ll_crypto_blkcipher_blocksize(tfm);
1100         LASSERT(blocksize > 1);
1101         LASSERT(cipher->len == blocksize + sizeof(*khdr));
1102
1103         ciph_desc.tfm  = tfm;
1104         ciph_desc.info = local_iv;
1105         ciph_desc.flags = 0;
1106
1107         if (desc->bd_nob_transferred % blocksize) {
1108                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
1109                 return -EPROTO;
1110         }
1111
1112         /* decrypt head (confounder) */
1113         buf_to_sg(&src, cipher->data, blocksize);
1114         buf_to_sg(&dst, plain->data, blocksize);
1115
1116         rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
1117         if (rc) {
1118                 CERROR("error to decrypt confounder: %d\n", rc);
1119                 return rc;
1120         }
1121
1122         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
1123              i++) {
1124                 if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
1125                     desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
1126                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
1127                                i, desc->bd_enc_iov[i].kiov_offset,
1128                                desc->bd_enc_iov[i].kiov_len, blocksize);
1129                         return -EFAULT;
1130                 }
1131
1132                 if (adj_nob) {
1133                         if (ct_nob + desc->bd_enc_iov[i].kiov_len >
1134                             desc->bd_nob_transferred)
1135                                 desc->bd_enc_iov[i].kiov_len =
1136                                         desc->bd_nob_transferred - ct_nob;
1137
1138                         desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
1139                         if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
1140                                 desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
1141                 } else {
1142                         /* this should be guaranteed by LNET */
1143                         LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
1144                                 desc->bd_nob_transferred);
1145                         LASSERT(desc->bd_iov[i].kiov_len <=
1146                                 desc->bd_enc_iov[i].kiov_len);
1147                 }
1148
1149                 if (desc->bd_enc_iov[i].kiov_len == 0)
1150                         continue;
1151
1152                 src.page = desc->bd_enc_iov[i].kiov_page;
1153                 src.offset = desc->bd_enc_iov[i].kiov_offset;
1154                 src.length = desc->bd_enc_iov[i].kiov_len;
1155
1156                 dst = src;
1157                 if (desc->bd_iov[i].kiov_len % blocksize == 0)
1158                         dst.page = desc->bd_iov[i].kiov_page;
1159
1160                 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
1161                                                     src.length);
1162                 if (rc) {
1163                         CERROR("error to decrypt page: %d\n", rc);
1164                         return rc;
1165                 }
1166
1167                 if (desc->bd_iov[i].kiov_len % blocksize != 0) {
1168                         memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) +
1169                                desc->bd_iov[i].kiov_offset,
1170                                cfs_page_address(desc->bd_enc_iov[i].kiov_page) +
1171                                desc->bd_iov[i].kiov_offset,
1172                                desc->bd_iov[i].kiov_len);
1173                 }
1174
1175                 ct_nob += desc->bd_enc_iov[i].kiov_len;
1176                 pt_nob += desc->bd_iov[i].kiov_len;
1177         }
1178
1179         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
1180                 CERROR("%d cipher text transferred but only %d decrypted\n",
1181                        desc->bd_nob_transferred, ct_nob);
1182                 return -EFAULT;
1183         }
1184
1185         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
1186                 CERROR("%d plain text expected but only %d received\n",
1187                        desc->bd_nob, pt_nob);
1188                 return -EFAULT;
1189         }
1190
1191         /* if needed, clear up the rest unused iovs */
1192         if (adj_nob)
1193                 while (i < desc->bd_iov_count)
1194                         desc->bd_iov[i++].kiov_len = 0;
1195
1196         /* decrypt tail (krb5 header) */
1197         buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
1198         buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1199
1200         rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
1201                                             &dst, &src, sizeof(*khdr));
1202         if (rc) {
1203                 CERROR("error to decrypt tail: %d\n", rc);
1204                 return rc;
1205         }
1206
1207         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
1208                 CERROR("krb5 header doesn't match\n");
1209                 return -EACCES;
1210         }
1211
1212         return 0;
1213 }
1214
1215 static
1216 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
1217                         rawobj_t *gsshdr,
1218                         rawobj_t *msg,
1219                         int msg_buflen,
1220                         rawobj_t *token)
1221 {
1222         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1223         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1224         struct krb5_header  *khdr;
1225         int                  blocksize;
1226         rawobj_t             cksum = RAWOBJ_EMPTY;
1227         rawobj_t             data_desc[3], cipher;
1228         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1229         int                  rc = 0;
1230
1231         LASSERT(ke);
1232         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1233         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
1234                 ke->ke_conf_size >=
1235                 ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
1236
1237         /*
1238          * final token format:
1239          * ---------------------------------------------------
1240          * | krb5 header | cipher text | checksum (16 bytes) |
1241          * ---------------------------------------------------
1242          */
1243
1244         /* fill krb5 header */
1245         LASSERT(token->len >= sizeof(*khdr));
1246         khdr = (struct krb5_header *) token->data;
1247         fill_krb5_header(kctx, khdr, 1);
1248
1249         /* generate confounder */
1250         cfs_get_random_bytes(conf, ke->ke_conf_size);
1251
1252         /* get encryption blocksize. note kc_keye might not associated with
1253          * a tfm, currently only for arcfour-hmac */
1254         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1255                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1256                 blocksize = 1;
1257         } else {
1258                 LASSERT(kctx->kc_keye.kb_tfm);
1259                 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1260         }
1261         LASSERT(blocksize <= ke->ke_conf_size);
1262
1263         /* padding the message */
1264         if (add_padding(msg, msg_buflen, blocksize))
1265                 return GSS_S_FAILURE;
1266
1267         /*
1268          * clear text layout for checksum:
1269          * ------------------------------------------------------
1270          * | confounder | gss header | clear msgs | krb5 header |
1271          * ------------------------------------------------------
1272          */
1273         data_desc[0].data = conf;
1274         data_desc[0].len = ke->ke_conf_size;
1275         data_desc[1].data = gsshdr->data;
1276         data_desc[1].len = gsshdr->len;
1277         data_desc[2].data = msg->data;
1278         data_desc[2].len = msg->len;
1279
1280         /* compute checksum */
1281         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1282                                khdr, 3, data_desc, 0, NULL, &cksum))
1283                 return GSS_S_FAILURE;
1284         LASSERT(cksum.len >= ke->ke_hash_size);
1285
1286         /*
1287          * clear text layout for encryption:
1288          * -----------------------------------------
1289          * | confounder | clear msgs | krb5 header |
1290          * -----------------------------------------
1291          */
1292         data_desc[0].data = conf;
1293         data_desc[0].len = ke->ke_conf_size;
1294         data_desc[1].data = msg->data;
1295         data_desc[1].len = msg->len;
1296         data_desc[2].data = (__u8 *) khdr;
1297         data_desc[2].len = sizeof(*khdr);
1298
1299         /* cipher text will be directly inplace */
1300         cipher.data = (__u8 *) (khdr + 1);
1301         cipher.len = token->len - sizeof(*khdr);
1302         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1303
1304         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1305                 rawobj_t                 arc4_keye;
1306                 struct ll_crypto_cipher *arc4_tfm;
1307
1308                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1309                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1310                         CERROR("failed to obtain arc4 enc key\n");
1311                         GOTO(arc4_out, rc = -EACCES);
1312                 }
1313
1314                 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1315                 if (arc4_tfm == NULL) {
1316                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1317                         GOTO(arc4_out_key, rc = -EACCES);
1318                 }
1319
1320                 if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1321                                                arc4_keye.len)) {
1322                         CERROR("failed to set arc4 key, len %d\n",
1323                                arc4_keye.len);
1324                         GOTO(arc4_out_tfm, rc = -EACCES);
1325                 }
1326
1327                 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1328                                           3, data_desc, &cipher, 1);
1329 arc4_out_tfm:
1330                 ll_crypto_free_blkcipher(arc4_tfm);
1331 arc4_out_key:
1332                 rawobj_free(&arc4_keye);
1333 arc4_out:
1334                 do {} while(0); /* just to avoid compile warning */
1335         } else {
1336                 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1337                                           3, data_desc, &cipher, 1);
1338         }
1339
1340         if (rc != 0) {
1341                 rawobj_free(&cksum);
1342                 return GSS_S_FAILURE;
1343         }
1344
1345         /* fill in checksum */
1346         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1347         memcpy((char *)(khdr + 1) + cipher.len,
1348                cksum.data + cksum.len - ke->ke_hash_size,
1349                ke->ke_hash_size);
1350         rawobj_free(&cksum);
1351
1352         /* final token length */
1353         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1354         return GSS_S_COMPLETE;
1355 }
1356
1357 static
1358 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1359                              struct ptlrpc_bulk_desc *desc)
1360 {
1361         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1362         int                  blocksize, i;
1363
1364         LASSERT(desc->bd_iov_count);
1365         LASSERT(desc->bd_enc_iov);
1366         LASSERT(kctx->kc_keye.kb_tfm);
1367
1368         blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1369
1370         for (i = 0; i < desc->bd_iov_count; i++) {
1371                 LASSERT(desc->bd_enc_iov[i].kiov_page);
1372                 /*
1373                  * offset should always start at page boundary of either
1374                  * client or server side.
1375                  */
1376                 if (desc->bd_iov[i].kiov_offset & blocksize) {
1377                         CERROR("odd offset %d in page %d\n",
1378                                desc->bd_iov[i].kiov_offset, i);
1379                         return GSS_S_FAILURE;
1380                 }
1381
1382                 desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
1383                 desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
1384                                                 blocksize - 1) & (~(blocksize - 1));
1385         }
1386
1387         return GSS_S_COMPLETE;
1388 }
1389
1390 static
1391 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1392                              struct ptlrpc_bulk_desc *desc,
1393                              rawobj_t *token, int adj_nob)
1394 {
1395         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1396         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1397         struct krb5_header  *khdr;
1398         int                  blocksize;
1399         rawobj_t             cksum = RAWOBJ_EMPTY;
1400         rawobj_t             data_desc[1], cipher;
1401         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1402         int                  rc = 0;
1403
1404         LASSERT(ke);
1405         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1406
1407         /*
1408          * final token format:
1409          * --------------------------------------------------
1410          * | krb5 header | head/tail cipher text | checksum |
1411          * --------------------------------------------------
1412          */
1413
1414         /* fill krb5 header */
1415         LASSERT(token->len >= sizeof(*khdr));
1416         khdr = (struct krb5_header *) token->data;
1417         fill_krb5_header(kctx, khdr, 1);
1418
1419         /* generate confounder */
1420         cfs_get_random_bytes(conf, ke->ke_conf_size);
1421
1422         /* get encryption blocksize. note kc_keye might not associated with
1423          * a tfm, currently only for arcfour-hmac */
1424         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1425                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1426                 blocksize = 1;
1427         } else {
1428                 LASSERT(kctx->kc_keye.kb_tfm);
1429                 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1430         }
1431
1432         /*
1433          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1434          * the bulk token size would be exactly (sizeof(krb5_header) +
1435          * blocksize + sizeof(krb5_header) + hashsize)
1436          */
1437         LASSERT(blocksize <= ke->ke_conf_size);
1438         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1439         LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1440
1441         /*
1442          * clear text layout for checksum:
1443          * ------------------------------------------
1444          * | confounder | clear pages | krb5 header |
1445          * ------------------------------------------
1446          */
1447         data_desc[0].data = conf;
1448         data_desc[0].len = ke->ke_conf_size;
1449
1450         /* compute checksum */
1451         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1452                                khdr, 1, data_desc,
1453                                desc->bd_iov_count, desc->bd_iov,
1454                                &cksum))
1455                 return GSS_S_FAILURE;
1456         LASSERT(cksum.len >= ke->ke_hash_size);
1457
1458         /*
1459          * clear text layout for encryption:
1460          * ------------------------------------------
1461          * | confounder | clear pages | krb5 header |
1462          * ------------------------------------------
1463          *        |              |             |
1464          *        ----------  (cipher pages)   |
1465          * result token:   |                   |
1466          * -------------------------------------------
1467          * | krb5 header | cipher text | cipher text |
1468          * -------------------------------------------
1469          */
1470         data_desc[0].data = conf;
1471         data_desc[0].len = ke->ke_conf_size;
1472
1473         cipher.data = (__u8 *) (khdr + 1);
1474         cipher.len = blocksize + sizeof(*khdr);
1475
1476         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1477                 LBUG();
1478                 rc = 0;
1479         } else {
1480                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1481                                        conf, desc, &cipher, adj_nob);
1482         }
1483
1484         if (rc != 0) {
1485                 rawobj_free(&cksum);
1486                 return GSS_S_FAILURE;
1487         }
1488
1489         /* fill in checksum */
1490         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1491         memcpy((char *)(khdr + 1) + cipher.len,
1492                cksum.data + cksum.len - ke->ke_hash_size,
1493                ke->ke_hash_size);
1494         rawobj_free(&cksum);
1495
1496         /* final token length */
1497         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1498         return GSS_S_COMPLETE;
1499 }
1500
1501 static
1502 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1503                           rawobj_t        *gsshdr,
1504                           rawobj_t        *token,
1505                           rawobj_t        *msg)
1506 {
1507         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1508         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1509         struct krb5_header  *khdr;
1510         unsigned char       *tmpbuf;
1511         int                  blocksize, bodysize;
1512         rawobj_t             cksum = RAWOBJ_EMPTY;
1513         rawobj_t             cipher_in, plain_out;
1514         rawobj_t             hash_objs[3];
1515         int                  rc = 0;
1516         __u32                major;
1517
1518         LASSERT(ke);
1519
1520         if (token->len < sizeof(*khdr)) {
1521                 CERROR("short signature: %u\n", token->len);
1522                 return GSS_S_DEFECTIVE_TOKEN;
1523         }
1524
1525         khdr = (struct krb5_header *) token->data;
1526
1527         major = verify_krb5_header(kctx, khdr, 1);
1528         if (major != GSS_S_COMPLETE) {
1529                 CERROR("bad krb5 header\n");
1530                 return major;
1531         }
1532
1533         /* block size */
1534         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1535                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1536                 blocksize = 1;
1537         } else {
1538                 LASSERT(kctx->kc_keye.kb_tfm);
1539                 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1540         }
1541
1542         /* expected token layout:
1543          * ----------------------------------------
1544          * | krb5 header | cipher text | checksum |
1545          * ----------------------------------------
1546          */
1547         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1548
1549         if (bodysize % blocksize) {
1550                 CERROR("odd bodysize %d\n", bodysize);
1551                 return GSS_S_DEFECTIVE_TOKEN;
1552         }
1553
1554         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1555                 CERROR("incomplete token: bodysize %d\n", bodysize);
1556                 return GSS_S_DEFECTIVE_TOKEN;
1557         }
1558
1559         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1560                 CERROR("buffer too small: %u, require %d\n",
1561                        msg->len, bodysize - ke->ke_conf_size);
1562                 return GSS_S_FAILURE;
1563         }
1564
1565         /* decrypting */
1566         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1567         if (!tmpbuf)
1568                 return GSS_S_FAILURE;
1569
1570         major = GSS_S_FAILURE;
1571
1572         cipher_in.data = (__u8 *) (khdr + 1);
1573         cipher_in.len = bodysize;
1574         plain_out.data = tmpbuf;
1575         plain_out.len = bodysize;
1576
1577         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1578                 rawobj_t                 arc4_keye;
1579                 struct ll_crypto_cipher *arc4_tfm;
1580
1581                 cksum.data = token->data + token->len - ke->ke_hash_size;
1582                 cksum.len = ke->ke_hash_size;
1583
1584                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1585                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1586                         CERROR("failed to obtain arc4 enc key\n");
1587                         GOTO(arc4_out, rc = -EACCES);
1588                 }
1589
1590                 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1591                 if (arc4_tfm == NULL) {
1592                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1593                         GOTO(arc4_out_key, rc = -EACCES);
1594                 }
1595
1596                 if (ll_crypto_blkcipher_setkey(arc4_tfm,
1597                                          arc4_keye.data, arc4_keye.len)) {
1598                         CERROR("failed to set arc4 key, len %d\n",
1599                                arc4_keye.len);
1600                         GOTO(arc4_out_tfm, rc = -EACCES);
1601                 }
1602
1603                 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1604                                           1, &cipher_in, &plain_out, 0);
1605 arc4_out_tfm:
1606                 ll_crypto_free_blkcipher(arc4_tfm);
1607 arc4_out_key:
1608                 rawobj_free(&arc4_keye);
1609 arc4_out:
1610                 cksum = RAWOBJ_EMPTY;
1611         } else {
1612                 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1613                                           1, &cipher_in, &plain_out, 0);
1614         }
1615
1616         if (rc != 0) {
1617                 CERROR("error decrypt\n");
1618                 goto out_free;
1619         }
1620         LASSERT(plain_out.len == bodysize);
1621
1622         /* expected clear text layout:
1623          * -----------------------------------------
1624          * | confounder | clear msgs | krb5 header |
1625          * -----------------------------------------
1626          */
1627
1628         /* verify krb5 header in token is not modified */
1629         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1630                    sizeof(*khdr))) {
1631                 CERROR("decrypted krb5 header mismatch\n");
1632                 goto out_free;
1633         }
1634
1635         /* verify checksum, compose clear text as layout:
1636          * ------------------------------------------------------
1637          * | confounder | gss header | clear msgs | krb5 header |
1638          * ------------------------------------------------------
1639          */
1640         hash_objs[0].len = ke->ke_conf_size;
1641         hash_objs[0].data = plain_out.data;
1642         hash_objs[1].len = gsshdr->len;
1643         hash_objs[1].data = gsshdr->data;
1644         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1645         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1646         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1647                                khdr, 3, hash_objs, 0, NULL, &cksum))
1648                 goto out_free;
1649
1650         LASSERT(cksum.len >= ke->ke_hash_size);
1651         if (memcmp((char *)(khdr + 1) + bodysize,
1652                    cksum.data + cksum.len - ke->ke_hash_size,
1653                    ke->ke_hash_size)) {
1654                 CERROR("checksum mismatch\n");
1655                 goto out_free;
1656         }
1657
1658         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1659         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1660
1661         major = GSS_S_COMPLETE;
1662 out_free:
1663         OBD_FREE_LARGE(tmpbuf, bodysize);
1664         rawobj_free(&cksum);
1665         return major;
1666 }
1667
1668 static
1669 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1670                                struct ptlrpc_bulk_desc *desc,
1671                                rawobj_t *token, int adj_nob)
1672 {
1673         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1674         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1675         struct krb5_header  *khdr;
1676         int                  blocksize;
1677         rawobj_t             cksum = RAWOBJ_EMPTY;
1678         rawobj_t             cipher, plain;
1679         rawobj_t             data_desc[1];
1680         int                  rc;
1681         __u32                major;
1682
1683         LASSERT(ke);
1684
1685         if (token->len < sizeof(*khdr)) {
1686                 CERROR("short signature: %u\n", token->len);
1687                 return GSS_S_DEFECTIVE_TOKEN;
1688         }
1689
1690         khdr = (struct krb5_header *) token->data;
1691
1692         major = verify_krb5_header(kctx, khdr, 1);
1693         if (major != GSS_S_COMPLETE) {
1694                 CERROR("bad krb5 header\n");
1695                 return major;
1696         }
1697
1698         /* block size */
1699         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1700                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1701                 blocksize = 1;
1702                 LBUG();
1703         } else {
1704                 LASSERT(kctx->kc_keye.kb_tfm);
1705                 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1706         }
1707         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1708
1709         /*
1710          * token format is expected as:
1711          * -----------------------------------------------
1712          * | krb5 header | head/tail cipher text | cksum |
1713          * -----------------------------------------------
1714          */
1715         if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1716                          ke->ke_hash_size) {
1717                 CERROR("short token size: %u\n", token->len);
1718                 return GSS_S_DEFECTIVE_TOKEN;
1719         }
1720
1721         cipher.data = (__u8 *) (khdr + 1);
1722         cipher.len = blocksize + sizeof(*khdr);
1723         plain.data = cipher.data;
1724         plain.len = cipher.len;
1725
1726         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1727                                desc, &cipher, &plain, adj_nob);
1728         if (rc)
1729                 return GSS_S_DEFECTIVE_TOKEN;
1730
1731         /*
1732          * verify checksum, compose clear text as layout:
1733          * ------------------------------------------
1734          * | confounder | clear pages | krb5 header |
1735          * ------------------------------------------
1736          */
1737         data_desc[0].data = plain.data;
1738         data_desc[0].len = blocksize;
1739
1740         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1741                                khdr, 1, data_desc,
1742                                desc->bd_iov_count, desc->bd_iov,
1743                                &cksum))
1744                 return GSS_S_FAILURE;
1745         LASSERT(cksum.len >= ke->ke_hash_size);
1746
1747         if (memcmp(plain.data + blocksize + sizeof(*khdr),
1748                    cksum.data + cksum.len - ke->ke_hash_size,
1749                    ke->ke_hash_size)) {
1750                 CERROR("checksum mismatch\n");
1751                 rawobj_free(&cksum);
1752                 return GSS_S_BAD_SIG;
1753         }
1754
1755         rawobj_free(&cksum);
1756         return GSS_S_COMPLETE;
1757 }
1758
1759 int gss_display_kerberos(struct gss_ctx        *ctx,
1760                          char                  *buf,
1761                          int                    bufsize)
1762 {
1763         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1764         int                 written;
1765
1766         written = snprintf(buf, bufsize, "krb5 (%s)",
1767                            enctype2str(kctx->kc_enctype));
1768         return written;
1769 }
1770
1771 static struct gss_api_ops gss_kerberos_ops = {
1772         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1773         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1774         .gss_inquire_context        = gss_inquire_context_kerberos,
1775         .gss_get_mic                = gss_get_mic_kerberos,
1776         .gss_verify_mic             = gss_verify_mic_kerberos,
1777         .gss_wrap                   = gss_wrap_kerberos,
1778         .gss_unwrap                 = gss_unwrap_kerberos,
1779         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1780         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1781         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1782         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1783         .gss_display                = gss_display_kerberos,
1784 };
1785
1786 static struct subflavor_desc gss_kerberos_sfs[] = {
1787         {
1788                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1789                 .sf_qop         = 0,
1790                 .sf_service     = SPTLRPC_SVC_NULL,
1791                 .sf_name        = "krb5n"
1792         },
1793         {
1794                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1795                 .sf_qop         = 0,
1796                 .sf_service     = SPTLRPC_SVC_AUTH,
1797                 .sf_name        = "krb5a"
1798         },
1799         {
1800                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1801                 .sf_qop         = 0,
1802                 .sf_service     = SPTLRPC_SVC_INTG,
1803                 .sf_name        = "krb5i"
1804         },
1805         {
1806                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1807                 .sf_qop         = 0,
1808                 .sf_service     = SPTLRPC_SVC_PRIV,
1809                 .sf_name        = "krb5p"
1810         },
1811 };
1812
1813 /*
1814  * currently we leave module owner NULL
1815  */
1816 static struct gss_api_mech gss_kerberos_mech = {
1817         .gm_owner       = NULL, /*THIS_MODULE, */
1818         .gm_name        = "krb5",
1819         .gm_oid         = (rawobj_t)
1820                                 {9, "\052\206\110\206\367\022\001\002\002"},
1821         .gm_ops         = &gss_kerberos_ops,
1822         .gm_sf_num      = 4,
1823         .gm_sfs         = gss_kerberos_sfs,
1824 };
1825
1826 int __init init_kerberos_module(void)
1827 {
1828         int status;
1829
1830         cfs_spin_lock_init(&krb5_seq_lock);
1831
1832         status = lgss_mech_register(&gss_kerberos_mech);
1833         if (status)
1834                 CERROR("Failed to register kerberos gss mechanism!\n");
1835         return status;
1836 }
1837
1838 void __exit cleanup_kerberos_module(void)
1839 {
1840         lgss_mech_unregister(&gss_kerberos_mech);
1841 }