Whamcloud - gitweb
LU-1346 libcfs: replace libcfs wrappers with kernel API
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, Whamcloud, Inc.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #ifdef __KERNEL__
53 #include <linux/init.h>
54 #include <linux/module.h>
55 #include <linux/slab.h>
56 #include <linux/crypto.h>
57 #include <linux/mutex.h>
58 #else
59 #include <liblustre.h>
60 #endif
61
62 #include <obd.h>
63 #include <obd_class.h>
64 #include <obd_support.h>
65 #include <lustre/lustre_idl.h>
66 #include <lustre_net.h>
67 #include <lustre_import.h>
68 #include <lustre_sec.h>
69
70 #include "gss_err.h"
71 #include "gss_internal.h"
72 #include "gss_api.h"
73 #include "gss_asn1.h"
74 #include "gss_krb5.h"
75
76 static spinlock_t krb5_seq_lock;
77
78 struct krb5_enctype {
79         char           *ke_dispname;
80         char           *ke_enc_name;            /* linux tfm name */
81         char           *ke_hash_name;           /* linux tfm name */
82         int             ke_enc_mode;            /* linux tfm mode */
83         int             ke_hash_size;           /* checksum size */
84         int             ke_conf_size;           /* confounder size */
85         unsigned int    ke_hash_hmac:1;         /* is hmac? */
86 };
87
88 /*
89  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
90  * but currently we simply CBC with padding, because linux doesn't support CTS
91  * yet. this need to be fixed in the future.
92  */
93 static struct krb5_enctype enctypes[] = {
94         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
95                 "des-cbc-md5",
96                 "cbc(des)",
97                 "md5",
98                 0,
99                 16,
100                 8,
101                 0,
102         },
103         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
104                 "des3-hmac-sha1",
105                 "cbc(des3_ede)",
106                 "hmac(sha1)",
107                 0,
108                 20,
109                 8,
110                 1,
111         },
112         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
113                 "aes128-cts-hmac-sha1-96",
114                 "cbc(aes)",
115                 "hmac(sha1)",
116                 0,
117                 12,
118                 16,
119                 1,
120         },
121         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
122                 "aes256-cts-hmac-sha1-96",
123                 "cbc(aes)",
124                 "hmac(sha1)",
125                 0,
126                 12,
127                 16,
128                 1,
129         },
130         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
131                 "arcfour-hmac-md5",
132                 "ecb(arc4)",
133                 "hmac(md5)",
134                 0,
135                 16,
136                 8,
137                 1,
138         },
139 };
140
141 #define MAX_ENCTYPES    sizeof(enctypes)/sizeof(struct krb5_enctype)
142
143 static const char * enctype2str(__u32 enctype)
144 {
145         if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
146                 return enctypes[enctype].ke_dispname;
147
148         return "unknown";
149 }
150
151 static
152 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
153 {
154         kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
155         if (IS_ERR(kb->kb_tfm)) {
156                 CERROR("failed to alloc tfm: %s, mode %d\n",
157                        alg_name, alg_mode);
158                 return -1;
159         }
160
161         if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
162                 CERROR("failed to set %s key, len %d\n",
163                        alg_name, kb->kb_key.len);
164                 return -1;
165         }
166
167         return 0;
168 }
169
170 static
171 int krb5_init_keys(struct krb5_ctx *kctx)
172 {
173         struct krb5_enctype *ke;
174
175         if (kctx->kc_enctype >= MAX_ENCTYPES ||
176             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
177                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
178                 return -1;
179         }
180
181         ke = &enctypes[kctx->kc_enctype];
182
183         /* tfm arc4 is stateful, user should alloc-use-free by his own */
184         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
185             keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
186                 return -1;
187
188         /* tfm hmac is stateful, user should alloc-use-free by his own */
189         if (ke->ke_hash_hmac == 0 &&
190             keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
191                 return -1;
192         if (ke->ke_hash_hmac == 0 &&
193             keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
194                 return -1;
195
196         return 0;
197 }
198
199 static
200 void keyblock_free(struct krb5_keyblock *kb)
201 {
202         rawobj_free(&kb->kb_key);
203         if (kb->kb_tfm)
204                 ll_crypto_free_blkcipher(kb->kb_tfm);
205 }
206
207 static
208 int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
209 {
210         return rawobj_dup(&new->kb_key, &kb->kb_key);
211 }
212
213 static
214 int get_bytes(char **ptr, const char *end, void *res, int len)
215 {
216         char *p, *q;
217         p = *ptr;
218         q = p + len;
219         if (q > end || q < p)
220                 return -1;
221         memcpy(res, p, len);
222         *ptr = q;
223         return 0;
224 }
225
226 static
227 int get_rawobj(char **ptr, const char *end, rawobj_t *res)
228 {
229         char   *p, *q;
230         __u32   len;
231
232         p = *ptr;
233         if (get_bytes(&p, end, &len, sizeof(len)))
234                 return -1;
235
236         q = p + len;
237         if (q > end || q < p)
238                 return -1;
239
240         OBD_ALLOC_LARGE(res->data, len);
241         if (!res->data)
242                 return -1;
243
244         res->len = len;
245         memcpy(res->data, p, len);
246         *ptr = q;
247         return 0;
248 }
249
250 static
251 int get_keyblock(char **ptr, const char *end,
252                  struct krb5_keyblock *kb, __u32 keysize)
253 {
254         char *buf;
255
256         OBD_ALLOC_LARGE(buf, keysize);
257         if (buf == NULL)
258                 return -1;
259
260         if (get_bytes(ptr, end, buf, keysize)) {
261                 OBD_FREE_LARGE(buf, keysize);
262                 return -1;
263         }
264
265         kb->kb_key.len = keysize;
266         kb->kb_key.data = buf;
267         return 0;
268 }
269
270 static
271 void delete_context_kerberos(struct krb5_ctx *kctx)
272 {
273         rawobj_free(&kctx->kc_mech_used);
274
275         keyblock_free(&kctx->kc_keye);
276         keyblock_free(&kctx->kc_keyi);
277         keyblock_free(&kctx->kc_keyc);
278 }
279
280 static
281 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
282 {
283         unsigned int    tmp_uint, keysize;
284
285         /* seed_init flag */
286         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
287                 goto out_err;
288         kctx->kc_seed_init = (tmp_uint != 0);
289
290         /* seed */
291         if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
292                 goto out_err;
293
294         /* sign/seal algorithm, not really used now */
295         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
296             get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
297                 goto out_err;
298
299         /* end time */
300         if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
301                 goto out_err;
302
303         /* seq send */
304         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
305                 goto out_err;
306         kctx->kc_seq_send = tmp_uint;
307
308         /* mech oid */
309         if (get_rawobj(&p, end, &kctx->kc_mech_used))
310                 goto out_err;
311
312         /* old style enc/seq keys in format:
313          *   - enctype (u32)
314          *   - keysize (u32)
315          *   - keydata
316          * we decompose them to fit into the new context
317          */
318
319         /* enc key */
320         if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
321                 goto out_err;
322
323         if (get_bytes(&p, end, &keysize, sizeof(keysize)))
324                 goto out_err;
325
326         if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
327                 goto out_err;
328
329         /* seq key */
330         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
331             tmp_uint != kctx->kc_enctype)
332                 goto out_err;
333
334         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
335             tmp_uint != keysize)
336                 goto out_err;
337
338         if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
339                 goto out_err;
340
341         /* old style fallback */
342         if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
343                 goto out_err;
344
345         if (p != end)
346                 goto out_err;
347
348         CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
349         return 0;
350 out_err:
351         return GSS_S_FAILURE;
352 }
353
354 /* Flags for version 2 context flags */
355 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
356 #define KRB5_CTX_FLAG_CFX               0x00000002
357 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
358
359 static
360 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
361 {
362         unsigned int    tmp_uint, keysize;
363
364         /* end time */
365         if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
366                 goto out_err;
367
368         /* flags */
369         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
370                 goto out_err;
371
372         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
373                 kctx->kc_initiate = 1;
374         if (tmp_uint & KRB5_CTX_FLAG_CFX)
375                 kctx->kc_cfx = 1;
376         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
377                 kctx->kc_have_acceptor_subkey = 1;
378
379         /* seq send */
380         if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
381                 goto out_err;
382
383         /* enctype */
384         if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
385                 goto out_err;
386
387         /* size of each key */
388         if (get_bytes(&p, end, &keysize, sizeof(keysize)))
389                 goto out_err;
390
391         /* number of keys - should always be 3 */
392         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
393                 goto out_err;
394
395         if (tmp_uint != 3) {
396                 CERROR("Invalid number of keys: %u\n", tmp_uint);
397                 goto out_err;
398         }
399
400         /* ke */
401         if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
402                 goto out_err;
403         /* ki */
404         if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
405                 goto out_err;
406         /* ki */
407         if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
408                 goto out_err;
409
410         CDEBUG(D_SEC, "succesfully imported v2 context\n");
411         return 0;
412 out_err:
413         return GSS_S_FAILURE;
414 }
415
416 /*
417  * The whole purpose here is trying to keep user level gss context parsing
418  * from nfs-utils unchanged as possible as we can, they are not quite mature
419  * yet, and many stuff still not clear, like heimdal etc.
420  */
421 static
422 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
423                                       struct gss_ctx *gctx)
424 {
425         struct krb5_ctx *kctx;
426         char            *p = (char *) inbuf->data;
427         char            *end = (char *) (inbuf->data + inbuf->len);
428         unsigned int     tmp_uint, rc;
429
430         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
431                 CERROR("Fail to read version\n");
432                 return GSS_S_FAILURE;
433         }
434
435         /* only support 0, 1 for the moment */
436         if (tmp_uint > 2) {
437                 CERROR("Invalid version %u\n", tmp_uint);
438                 return GSS_S_FAILURE;
439         }
440
441         OBD_ALLOC_PTR(kctx);
442         if (!kctx)
443                 return GSS_S_FAILURE;
444
445         if (tmp_uint == 0 || tmp_uint == 1) {
446                 kctx->kc_initiate = tmp_uint;
447                 rc = import_context_rfc1964(kctx, p, end);
448         } else {
449                 rc = import_context_rfc4121(kctx, p, end);
450         }
451
452         if (rc == 0)
453                 rc = krb5_init_keys(kctx);
454
455         if (rc) {
456                 delete_context_kerberos(kctx);
457                 OBD_FREE_PTR(kctx);
458
459                 return GSS_S_FAILURE;
460         }
461
462         gctx->internal_ctx_id = kctx;
463         return GSS_S_COMPLETE;
464 }
465
466 static
467 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
468                                         struct gss_ctx *gctx_new)
469 {
470         struct krb5_ctx *kctx = gctx->internal_ctx_id;
471         struct krb5_ctx *knew;
472
473         OBD_ALLOC_PTR(knew);
474         if (!knew)
475                 return GSS_S_FAILURE;
476
477         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
478         knew->kc_cfx = kctx->kc_cfx;
479         knew->kc_seed_init = kctx->kc_seed_init;
480         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
481         knew->kc_endtime = kctx->kc_endtime;
482
483         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
484         knew->kc_seq_send = kctx->kc_seq_recv;
485         knew->kc_seq_recv = kctx->kc_seq_send;
486         knew->kc_enctype = kctx->kc_enctype;
487
488         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
489                 goto out_err;
490
491         if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
492                 goto out_err;
493         if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
494                 goto out_err;
495         if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
496                 goto out_err;
497         if (krb5_init_keys(knew))
498                 goto out_err;
499
500         gctx_new->internal_ctx_id = knew;
501         CDEBUG(D_SEC, "succesfully copied reverse context\n");
502         return GSS_S_COMPLETE;
503
504 out_err:
505         delete_context_kerberos(knew);
506         OBD_FREE_PTR(knew);
507         return GSS_S_FAILURE;
508 }
509
510 static
511 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
512                                    unsigned long  *endtime)
513 {
514         struct krb5_ctx *kctx = gctx->internal_ctx_id;
515
516         *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
517         return GSS_S_COMPLETE;
518 }
519
520 static
521 void gss_delete_sec_context_kerberos(void *internal_ctx)
522 {
523         struct krb5_ctx *kctx = internal_ctx;
524
525         delete_context_kerberos(kctx);
526         OBD_FREE_PTR(kctx);
527 }
528
529 static
530 void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
531 {
532         sg->page = virt_to_page(ptr);
533         sg->offset = offset_in_page(ptr);
534         sg->length = len;
535 }
536
537 static
538 __u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
539                    int decrypt,
540                    void * iv,
541                    void * in,
542                    void * out,
543                    int length)
544 {
545         struct blkcipher_desc desc;
546         struct scatterlist    sg;
547         __u8 local_iv[16] = {0};
548         __u32 ret = -EINVAL;
549
550         LASSERT(tfm);
551         desc.tfm  = tfm;
552         desc.info = local_iv;
553         desc.flags= 0;
554
555         if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
556                 CERROR("output length %d mismatch blocksize %d\n",
557                        length, ll_crypto_blkcipher_blocksize(tfm));
558                 goto out;
559         }
560
561         if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
562                 CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
563                 goto out;
564         }
565
566         if (iv)
567                 memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
568
569         memcpy(out, in, length);
570         buf_to_sg(&sg, out, length);
571
572         if (decrypt)
573                 ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
574         else
575                 ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
576
577 out:
578         return(ret);
579 }
580
581 #ifdef HAVE_ASYNC_BLOCK_CIPHER
582
583 static inline
584 int krb5_digest_hmac(struct ll_crypto_hash *tfm,
585                      rawobj_t *key,
586                      struct krb5_header *khdr,
587                      int msgcnt, rawobj_t *msgs,
588                      int iovcnt, lnet_kiov_t *iovs,
589                      rawobj_t *cksum)
590 {
591         struct hash_desc   desc;
592         struct scatterlist sg[1];
593         int                i;
594
595         ll_crypto_hash_setkey(tfm, key->data, key->len);
596         desc.tfm  = tfm;
597         desc.flags= 0;
598
599         ll_crypto_hash_init(&desc);
600
601         for (i = 0; i < msgcnt; i++) {
602                 if (msgs[i].len == 0)
603                         continue;
604                 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
605                 ll_crypto_hash_update(&desc, sg, msgs[i].len);
606         }
607
608         for (i = 0; i < iovcnt; i++) {
609                 if (iovs[i].kiov_len == 0)
610                         continue;
611                 sg[0].page = iovs[i].kiov_page;
612                 sg[0].offset = iovs[i].kiov_offset;
613                 sg[0].length = iovs[i].kiov_len;
614                 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
615         }
616
617         if (khdr) {
618                 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
619                 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
620         }
621
622         return ll_crypto_hash_final(&desc, cksum->data);
623 }
624
625 #else /* ! HAVE_ASYNC_BLOCK_CIPHER */
626
627 static inline
628 int krb5_digest_hmac(struct ll_crypto_hash *tfm,
629                      rawobj_t *key,
630                      struct krb5_header *khdr,
631                      int msgcnt, rawobj_t *msgs,
632                      int iovcnt, lnet_kiov_t *iovs,
633                      rawobj_t *cksum)
634 {
635         struct scatterlist sg[1];
636         __u32              keylen = key->len, i;
637
638         crypto_hmac_init(tfm, key->data, &keylen);
639
640         for (i = 0; i < msgcnt; i++) {
641                 if (msgs[i].len == 0)
642                         continue;
643                 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
644                 crypto_hmac_update(tfm, sg, 1);
645         }
646
647         for (i = 0; i < iovcnt; i++) {
648                 if (iovs[i].kiov_len == 0)
649                         continue;
650                 sg[0].page = iovs[i].kiov_page;
651                 sg[0].offset = iovs[i].kiov_offset;
652                 sg[0].length = iovs[i].kiov_len;
653                 crypto_hmac_update(tfm, sg, 1);
654         }
655
656         if (khdr) {
657                 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
658                 crypto_hmac_update(tfm, sg, 1);
659         }
660
661         crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
662         return 0;
663 }
664
665 #endif /* HAVE_ASYNC_BLOCK_CIPHER */
666
667 static inline
668 int krb5_digest_norm(struct ll_crypto_hash *tfm,
669                      struct krb5_keyblock *kb,
670                      struct krb5_header *khdr,
671                      int msgcnt, rawobj_t *msgs,
672                      int iovcnt, lnet_kiov_t *iovs,
673                      rawobj_t *cksum)
674 {
675         struct hash_desc   desc;
676         struct scatterlist sg[1];
677         int                i;
678
679         LASSERT(kb->kb_tfm);
680         desc.tfm  = tfm;
681         desc.flags= 0;
682
683         ll_crypto_hash_init(&desc);
684
685         for (i = 0; i < msgcnt; i++) {
686                 if (msgs[i].len == 0)
687                         continue;
688                 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
689                 ll_crypto_hash_update(&desc, sg, msgs[i].len);
690         }
691
692         for (i = 0; i < iovcnt; i++) {
693                 if (iovs[i].kiov_len == 0)
694                         continue;
695                 sg[0].page = iovs[i].kiov_page;
696                 sg[0].offset = iovs[i].kiov_offset;
697                 sg[0].length = iovs[i].kiov_len;
698                 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
699         }
700
701         if (khdr) {
702                 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
703                 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
704         }
705
706         ll_crypto_hash_final(&desc, cksum->data);
707
708         return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
709                             cksum->data, cksum->len);
710 }
711
712 /*
713  * compute (keyed/keyless) checksum against the plain text which appended
714  * with krb5 wire token header.
715  */
716 static
717 __s32 krb5_make_checksum(__u32 enctype,
718                          struct krb5_keyblock *kb,
719                          struct krb5_header *khdr,
720                          int msgcnt, rawobj_t *msgs,
721                          int iovcnt, lnet_kiov_t *iovs,
722                          rawobj_t *cksum)
723 {
724         struct krb5_enctype   *ke = &enctypes[enctype];
725         struct ll_crypto_hash *tfm;
726         __u32                  code = GSS_S_FAILURE;
727         int                    rc;
728
729         if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
730                 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
731                 return GSS_S_FAILURE;
732         }
733
734         cksum->len = ll_crypto_hash_digestsize(tfm);
735         OBD_ALLOC_LARGE(cksum->data, cksum->len);
736         if (!cksum->data) {
737                 cksum->len = 0;
738                 goto out_tfm;
739         }
740
741         if (ke->ke_hash_hmac)
742                 rc = krb5_digest_hmac(tfm, &kb->kb_key,
743                                       khdr, msgcnt, msgs, iovcnt, iovs, cksum);
744         else
745                 rc = krb5_digest_norm(tfm, kb,
746                                       khdr, msgcnt, msgs, iovcnt, iovs, cksum);
747
748         if (rc == 0)
749                 code = GSS_S_COMPLETE;
750 out_tfm:
751         ll_crypto_free_hash(tfm);
752         return code;
753 }
754
755 static void fill_krb5_header(struct krb5_ctx *kctx,
756                              struct krb5_header *khdr,
757                              int privacy)
758 {
759         unsigned char acceptor_flag;
760
761         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
762
763         if (privacy) {
764                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
765                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
766                 khdr->kh_ec = cpu_to_be16(0);
767                 khdr->kh_rrc = cpu_to_be16(0);
768         } else {
769                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
770                 khdr->kh_flags = acceptor_flag;
771                 khdr->kh_ec = cpu_to_be16(0xffff);
772                 khdr->kh_rrc = cpu_to_be16(0xffff);
773         }
774
775         khdr->kh_filler = 0xff;
776         spin_lock(&krb5_seq_lock);
777         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
778         spin_unlock(&krb5_seq_lock);
779 }
780
781 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
782                                 struct krb5_header *khdr,
783                                 int privacy)
784 {
785         unsigned char acceptor_flag;
786         __u16         tok_id, ec_rrc;
787
788         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
789
790         if (privacy) {
791                 tok_id = KG_TOK_WRAP_MSG;
792                 ec_rrc = 0x0;
793         } else {
794                 tok_id = KG_TOK_MIC_MSG;
795                 ec_rrc = 0xffff;
796         }
797
798         /* sanity checks */
799         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
800                 CERROR("bad token id\n");
801                 return GSS_S_DEFECTIVE_TOKEN;
802         }
803         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
804                 CERROR("bad direction flag\n");
805                 return GSS_S_BAD_SIG;
806         }
807         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
808                 CERROR("missing confidential flag\n");
809                 return GSS_S_BAD_SIG;
810         }
811         if (khdr->kh_filler != 0xff) {
812                 CERROR("bad filler\n");
813                 return GSS_S_DEFECTIVE_TOKEN;
814         }
815         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
816             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
817                 CERROR("bad EC or RRC\n");
818                 return GSS_S_DEFECTIVE_TOKEN;
819         }
820         return GSS_S_COMPLETE;
821 }
822
823 static
824 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
825                            int msgcnt,
826                            rawobj_t *msgs,
827                            int iovcnt,
828                            lnet_kiov_t *iovs,
829                            rawobj_t *token)
830 {
831         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
832         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
833         struct krb5_header  *khdr;
834         rawobj_t             cksum = RAWOBJ_EMPTY;
835
836         /* fill krb5 header */
837         LASSERT(token->len >= sizeof(*khdr));
838         khdr = (struct krb5_header *) token->data;
839         fill_krb5_header(kctx, khdr, 0);
840
841         /* checksum */
842         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
843                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
844                 return GSS_S_FAILURE;
845
846         LASSERT(cksum.len >= ke->ke_hash_size);
847         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
848         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
849                ke->ke_hash_size);
850
851         token->len = sizeof(*khdr) + ke->ke_hash_size;
852         rawobj_free(&cksum);
853         return GSS_S_COMPLETE;
854 }
855
856 static
857 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
858                               int msgcnt,
859                               rawobj_t *msgs,
860                               int iovcnt,
861                               lnet_kiov_t *iovs,
862                               rawobj_t *token)
863 {
864         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
865         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
866         struct krb5_header  *khdr;
867         rawobj_t             cksum = RAWOBJ_EMPTY;
868         __u32                major;
869
870         if (token->len < sizeof(*khdr)) {
871                 CERROR("short signature: %u\n", token->len);
872                 return GSS_S_DEFECTIVE_TOKEN;
873         }
874
875         khdr = (struct krb5_header *) token->data;
876
877         major = verify_krb5_header(kctx, khdr, 0);
878         if (major != GSS_S_COMPLETE) {
879                 CERROR("bad krb5 header\n");
880                 return major;
881         }
882
883         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
884                 CERROR("short signature: %u, require %d\n",
885                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
886                 return GSS_S_FAILURE;
887         }
888
889         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
890                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
891                 CERROR("failed to make checksum\n");
892                 return GSS_S_FAILURE;
893         }
894
895         LASSERT(cksum.len >= ke->ke_hash_size);
896         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
897                    ke->ke_hash_size)) {
898                 CERROR("checksum mismatch\n");
899                 rawobj_free(&cksum);
900                 return GSS_S_BAD_SIG;
901         }
902
903         rawobj_free(&cksum);
904         return GSS_S_COMPLETE;
905 }
906
907 static
908 int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
909 {
910         int padding;
911
912         padding = (blocksize - (msg->len & (blocksize - 1))) &
913                   (blocksize - 1);
914         if (!padding)
915                 return 0;
916
917         if (msg->len + padding > msg_buflen) {
918                 CERROR("bufsize %u too small: datalen %u, padding %u\n",
919                         msg_buflen, msg->len, padding);
920                 return -EINVAL;
921         }
922
923         memset(msg->data + msg->len, padding, padding);
924         msg->len += padding;
925         return 0;
926 }
927
928 static
929 int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
930                          int mode_ecb,
931                          int inobj_cnt,
932                          rawobj_t *inobjs,
933                          rawobj_t *outobj,
934                          int enc)
935 {
936         struct blkcipher_desc desc;
937         struct scatterlist    src, dst;
938         __u8                  local_iv[16] = {0}, *buf;
939         __u32                 datalen = 0;
940         int                   i, rc;
941         ENTRY;
942
943         buf = outobj->data;
944         desc.tfm  = tfm;
945         desc.info = local_iv;
946         desc.flags = 0;
947
948         for (i = 0; i < inobj_cnt; i++) {
949                 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
950
951                 buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
952                 buf_to_sg(&dst, buf, outobj->len - datalen);
953
954                 if (mode_ecb) {
955                         if (enc)
956                                 rc = ll_crypto_blkcipher_encrypt(
957                                         &desc, &dst, &src, src.length);
958                         else
959                                 rc = ll_crypto_blkcipher_decrypt(
960                                         &desc, &dst, &src, src.length);
961                 } else {
962                         if (enc)
963                                 rc = ll_crypto_blkcipher_encrypt_iv(
964                                         &desc, &dst, &src, src.length);
965                         else
966                                 rc = ll_crypto_blkcipher_decrypt_iv(
967                                         &desc, &dst, &src, src.length);
968                 }
969
970                 if (rc) {
971                         CERROR("encrypt error %d\n", rc);
972                         RETURN(rc);
973                 }
974
975                 datalen += inobjs[i].len;
976                 buf += inobjs[i].len;
977         }
978
979         outobj->len = datalen;
980         RETURN(0);
981 }
982
983 /*
984  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
985  */
986 static
987 int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
988                       struct krb5_header *khdr,
989                       char *confounder,
990                       struct ptlrpc_bulk_desc *desc,
991                       rawobj_t *cipher,
992                       int adj_nob)
993 {
994         struct blkcipher_desc   ciph_desc;
995         __u8                    local_iv[16] = {0};
996         struct scatterlist      src, dst;
997         int                     blocksize, i, rc, nob = 0;
998
999         LASSERT(desc->bd_iov_count);
1000         LASSERT(desc->bd_enc_iov);
1001
1002         blocksize = ll_crypto_blkcipher_blocksize(tfm);
1003         LASSERT(blocksize > 1);
1004         LASSERT(cipher->len == blocksize + sizeof(*khdr));
1005
1006         ciph_desc.tfm  = tfm;
1007         ciph_desc.info = local_iv;
1008         ciph_desc.flags = 0;
1009
1010         /* encrypt confounder */
1011         buf_to_sg(&src, confounder, blocksize);
1012         buf_to_sg(&dst, cipher->data, blocksize);
1013
1014         rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
1015         if (rc) {
1016                 CERROR("error to encrypt confounder: %d\n", rc);
1017                 return rc;
1018         }
1019
1020         /* encrypt clear pages */
1021         for (i = 0; i < desc->bd_iov_count; i++) {
1022                 src.page = desc->bd_iov[i].kiov_page;
1023                 src.offset = desc->bd_iov[i].kiov_offset;
1024                 src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
1025                              (~(blocksize - 1));
1026
1027                 if (adj_nob)
1028                         nob += src.length;
1029
1030                 dst.page = desc->bd_enc_iov[i].kiov_page;
1031                 dst.offset = src.offset;
1032                 dst.length = src.length;
1033
1034                 desc->bd_enc_iov[i].kiov_offset = dst.offset;
1035                 desc->bd_enc_iov[i].kiov_len = dst.length;
1036
1037                 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
1038                                                     src.length);
1039                 if (rc) {
1040                         CERROR("error to encrypt page: %d\n", rc);
1041                         return rc;
1042                 }
1043         }
1044
1045         /* encrypt krb5 header */
1046         buf_to_sg(&src, khdr, sizeof(*khdr));
1047         buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1048
1049         rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
1050                                             &dst, &src, sizeof(*khdr));
1051         if (rc) {
1052                 CERROR("error to encrypt krb5 header: %d\n", rc);
1053                 return rc;
1054         }
1055
1056         if (adj_nob)
1057                 desc->bd_nob = nob;
1058
1059         return 0;
1060 }
1061
1062 /*
1063  * desc->bd_nob_transferred is the size of cipher text received.
1064  * desc->bd_nob is the target size of plain text supposed to be.
1065  *
1066  * if adj_nob != 0, we adjust each page's kiov_len to the actual
1067  * plain text size.
1068  * - for client read: we don't know data size for each page, so
1069  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
1070  *   be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
1071  *   this means we DO NOT support the situation that server send an odd size
1072  *   data in a page which is not the last one.
1073  * - for server write: we knows exactly data size for each page being expected,
1074  *   thus kiov_len is accurate already, so we should not adjust it at all.
1075  *   and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
1076  *   should have been done by prep_bulk().
1077  */
1078 static
1079 int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
1080                       struct krb5_header *khdr,
1081                       struct ptlrpc_bulk_desc *desc,
1082                       rawobj_t *cipher,
1083                       rawobj_t *plain,
1084                       int adj_nob)
1085 {
1086         struct blkcipher_desc   ciph_desc;
1087         __u8                    local_iv[16] = {0};
1088         struct scatterlist      src, dst;
1089         int                     ct_nob = 0, pt_nob = 0;
1090         int                     blocksize, i, rc;
1091
1092         LASSERT(desc->bd_iov_count);
1093         LASSERT(desc->bd_enc_iov);
1094         LASSERT(desc->bd_nob_transferred);
1095
1096         blocksize = ll_crypto_blkcipher_blocksize(tfm);
1097         LASSERT(blocksize > 1);
1098         LASSERT(cipher->len == blocksize + sizeof(*khdr));
1099
1100         ciph_desc.tfm  = tfm;
1101         ciph_desc.info = local_iv;
1102         ciph_desc.flags = 0;
1103
1104         if (desc->bd_nob_transferred % blocksize) {
1105                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
1106                 return -EPROTO;
1107         }
1108
1109         /* decrypt head (confounder) */
1110         buf_to_sg(&src, cipher->data, blocksize);
1111         buf_to_sg(&dst, plain->data, blocksize);
1112
1113         rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
1114         if (rc) {
1115                 CERROR("error to decrypt confounder: %d\n", rc);
1116                 return rc;
1117         }
1118
1119         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
1120              i++) {
1121                 if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
1122                     desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
1123                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
1124                                i, desc->bd_enc_iov[i].kiov_offset,
1125                                desc->bd_enc_iov[i].kiov_len, blocksize);
1126                         return -EFAULT;
1127                 }
1128
1129                 if (adj_nob) {
1130                         if (ct_nob + desc->bd_enc_iov[i].kiov_len >
1131                             desc->bd_nob_transferred)
1132                                 desc->bd_enc_iov[i].kiov_len =
1133                                         desc->bd_nob_transferred - ct_nob;
1134
1135                         desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
1136                         if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
1137                                 desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
1138                 } else {
1139                         /* this should be guaranteed by LNET */
1140                         LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
1141                                 desc->bd_nob_transferred);
1142                         LASSERT(desc->bd_iov[i].kiov_len <=
1143                                 desc->bd_enc_iov[i].kiov_len);
1144                 }
1145
1146                 if (desc->bd_enc_iov[i].kiov_len == 0)
1147                         continue;
1148
1149                 src.page = desc->bd_enc_iov[i].kiov_page;
1150                 src.offset = desc->bd_enc_iov[i].kiov_offset;
1151                 src.length = desc->bd_enc_iov[i].kiov_len;
1152
1153                 dst = src;
1154                 if (desc->bd_iov[i].kiov_len % blocksize == 0)
1155                         dst.page = desc->bd_iov[i].kiov_page;
1156
1157                 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
1158                                                     src.length);
1159                 if (rc) {
1160                         CERROR("error to decrypt page: %d\n", rc);
1161                         return rc;
1162                 }
1163
1164                 if (desc->bd_iov[i].kiov_len % blocksize != 0) {
1165                         memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) +
1166                                desc->bd_iov[i].kiov_offset,
1167                                cfs_page_address(desc->bd_enc_iov[i].kiov_page) +
1168                                desc->bd_iov[i].kiov_offset,
1169                                desc->bd_iov[i].kiov_len);
1170                 }
1171
1172                 ct_nob += desc->bd_enc_iov[i].kiov_len;
1173                 pt_nob += desc->bd_iov[i].kiov_len;
1174         }
1175
1176         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
1177                 CERROR("%d cipher text transferred but only %d decrypted\n",
1178                        desc->bd_nob_transferred, ct_nob);
1179                 return -EFAULT;
1180         }
1181
1182         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
1183                 CERROR("%d plain text expected but only %d received\n",
1184                        desc->bd_nob, pt_nob);
1185                 return -EFAULT;
1186         }
1187
1188         /* if needed, clear up the rest unused iovs */
1189         if (adj_nob)
1190                 while (i < desc->bd_iov_count)
1191                         desc->bd_iov[i++].kiov_len = 0;
1192
1193         /* decrypt tail (krb5 header) */
1194         buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
1195         buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1196
1197         rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
1198                                             &dst, &src, sizeof(*khdr));
1199         if (rc) {
1200                 CERROR("error to decrypt tail: %d\n", rc);
1201                 return rc;
1202         }
1203
1204         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
1205                 CERROR("krb5 header doesn't match\n");
1206                 return -EACCES;
1207         }
1208
1209         return 0;
1210 }
1211
1212 static
1213 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
1214                         rawobj_t *gsshdr,
1215                         rawobj_t *msg,
1216                         int msg_buflen,
1217                         rawobj_t *token)
1218 {
1219         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1220         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1221         struct krb5_header  *khdr;
1222         int                  blocksize;
1223         rawobj_t             cksum = RAWOBJ_EMPTY;
1224         rawobj_t             data_desc[3], cipher;
1225         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1226         int                  rc = 0;
1227
1228         LASSERT(ke);
1229         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1230         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
1231                 ke->ke_conf_size >=
1232                 ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
1233
1234         /*
1235          * final token format:
1236          * ---------------------------------------------------
1237          * | krb5 header | cipher text | checksum (16 bytes) |
1238          * ---------------------------------------------------
1239          */
1240
1241         /* fill krb5 header */
1242         LASSERT(token->len >= sizeof(*khdr));
1243         khdr = (struct krb5_header *) token->data;
1244         fill_krb5_header(kctx, khdr, 1);
1245
1246         /* generate confounder */
1247         cfs_get_random_bytes(conf, ke->ke_conf_size);
1248
1249         /* get encryption blocksize. note kc_keye might not associated with
1250          * a tfm, currently only for arcfour-hmac */
1251         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1252                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1253                 blocksize = 1;
1254         } else {
1255                 LASSERT(kctx->kc_keye.kb_tfm);
1256                 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1257         }
1258         LASSERT(blocksize <= ke->ke_conf_size);
1259
1260         /* padding the message */
1261         if (add_padding(msg, msg_buflen, blocksize))
1262                 return GSS_S_FAILURE;
1263
1264         /*
1265          * clear text layout for checksum:
1266          * ------------------------------------------------------
1267          * | confounder | gss header | clear msgs | krb5 header |
1268          * ------------------------------------------------------
1269          */
1270         data_desc[0].data = conf;
1271         data_desc[0].len = ke->ke_conf_size;
1272         data_desc[1].data = gsshdr->data;
1273         data_desc[1].len = gsshdr->len;
1274         data_desc[2].data = msg->data;
1275         data_desc[2].len = msg->len;
1276
1277         /* compute checksum */
1278         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1279                                khdr, 3, data_desc, 0, NULL, &cksum))
1280                 return GSS_S_FAILURE;
1281         LASSERT(cksum.len >= ke->ke_hash_size);
1282
1283         /*
1284          * clear text layout for encryption:
1285          * -----------------------------------------
1286          * | confounder | clear msgs | krb5 header |
1287          * -----------------------------------------
1288          */
1289         data_desc[0].data = conf;
1290         data_desc[0].len = ke->ke_conf_size;
1291         data_desc[1].data = msg->data;
1292         data_desc[1].len = msg->len;
1293         data_desc[2].data = (__u8 *) khdr;
1294         data_desc[2].len = sizeof(*khdr);
1295
1296         /* cipher text will be directly inplace */
1297         cipher.data = (__u8 *) (khdr + 1);
1298         cipher.len = token->len - sizeof(*khdr);
1299         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1300
1301         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1302                 rawobj_t                 arc4_keye;
1303                 struct ll_crypto_cipher *arc4_tfm;
1304
1305                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1306                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1307                         CERROR("failed to obtain arc4 enc key\n");
1308                         GOTO(arc4_out, rc = -EACCES);
1309                 }
1310
1311                 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1312                 if (IS_ERR(arc4_tfm)) {
1313                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1314                         GOTO(arc4_out_key, rc = -EACCES);
1315                 }
1316
1317                 if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1318                                                arc4_keye.len)) {
1319                         CERROR("failed to set arc4 key, len %d\n",
1320                                arc4_keye.len);
1321                         GOTO(arc4_out_tfm, rc = -EACCES);
1322                 }
1323
1324                 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1325                                           3, data_desc, &cipher, 1);
1326 arc4_out_tfm:
1327                 ll_crypto_free_blkcipher(arc4_tfm);
1328 arc4_out_key:
1329                 rawobj_free(&arc4_keye);
1330 arc4_out:
1331                 do {} while(0); /* just to avoid compile warning */
1332         } else {
1333                 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1334                                           3, data_desc, &cipher, 1);
1335         }
1336
1337         if (rc != 0) {
1338                 rawobj_free(&cksum);
1339                 return GSS_S_FAILURE;
1340         }
1341
1342         /* fill in checksum */
1343         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1344         memcpy((char *)(khdr + 1) + cipher.len,
1345                cksum.data + cksum.len - ke->ke_hash_size,
1346                ke->ke_hash_size);
1347         rawobj_free(&cksum);
1348
1349         /* final token length */
1350         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1351         return GSS_S_COMPLETE;
1352 }
1353
1354 static
1355 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1356                              struct ptlrpc_bulk_desc *desc)
1357 {
1358         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1359         int                  blocksize, i;
1360
1361         LASSERT(desc->bd_iov_count);
1362         LASSERT(desc->bd_enc_iov);
1363         LASSERT(kctx->kc_keye.kb_tfm);
1364
1365         blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1366
1367         for (i = 0; i < desc->bd_iov_count; i++) {
1368                 LASSERT(desc->bd_enc_iov[i].kiov_page);
1369                 /*
1370                  * offset should always start at page boundary of either
1371                  * client or server side.
1372                  */
1373                 if (desc->bd_iov[i].kiov_offset & blocksize) {
1374                         CERROR("odd offset %d in page %d\n",
1375                                desc->bd_iov[i].kiov_offset, i);
1376                         return GSS_S_FAILURE;
1377                 }
1378
1379                 desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
1380                 desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
1381                                                 blocksize - 1) & (~(blocksize - 1));
1382         }
1383
1384         return GSS_S_COMPLETE;
1385 }
1386
1387 static
1388 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1389                              struct ptlrpc_bulk_desc *desc,
1390                              rawobj_t *token, int adj_nob)
1391 {
1392         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1393         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1394         struct krb5_header  *khdr;
1395         int                  blocksize;
1396         rawobj_t             cksum = RAWOBJ_EMPTY;
1397         rawobj_t             data_desc[1], cipher;
1398         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1399         int                  rc = 0;
1400
1401         LASSERT(ke);
1402         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1403
1404         /*
1405          * final token format:
1406          * --------------------------------------------------
1407          * | krb5 header | head/tail cipher text | checksum |
1408          * --------------------------------------------------
1409          */
1410
1411         /* fill krb5 header */
1412         LASSERT(token->len >= sizeof(*khdr));
1413         khdr = (struct krb5_header *) token->data;
1414         fill_krb5_header(kctx, khdr, 1);
1415
1416         /* generate confounder */
1417         cfs_get_random_bytes(conf, ke->ke_conf_size);
1418
1419         /* get encryption blocksize. note kc_keye might not associated with
1420          * a tfm, currently only for arcfour-hmac */
1421         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1422                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1423                 blocksize = 1;
1424         } else {
1425                 LASSERT(kctx->kc_keye.kb_tfm);
1426                 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1427         }
1428
1429         /*
1430          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1431          * the bulk token size would be exactly (sizeof(krb5_header) +
1432          * blocksize + sizeof(krb5_header) + hashsize)
1433          */
1434         LASSERT(blocksize <= ke->ke_conf_size);
1435         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1436         LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1437
1438         /*
1439          * clear text layout for checksum:
1440          * ------------------------------------------
1441          * | confounder | clear pages | krb5 header |
1442          * ------------------------------------------
1443          */
1444         data_desc[0].data = conf;
1445         data_desc[0].len = ke->ke_conf_size;
1446
1447         /* compute checksum */
1448         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1449                                khdr, 1, data_desc,
1450                                desc->bd_iov_count, desc->bd_iov,
1451                                &cksum))
1452                 return GSS_S_FAILURE;
1453         LASSERT(cksum.len >= ke->ke_hash_size);
1454
1455         /*
1456          * clear text layout for encryption:
1457          * ------------------------------------------
1458          * | confounder | clear pages | krb5 header |
1459          * ------------------------------------------
1460          *        |              |             |
1461          *        ----------  (cipher pages)   |
1462          * result token:   |                   |
1463          * -------------------------------------------
1464          * | krb5 header | cipher text | cipher text |
1465          * -------------------------------------------
1466          */
1467         data_desc[0].data = conf;
1468         data_desc[0].len = ke->ke_conf_size;
1469
1470         cipher.data = (__u8 *) (khdr + 1);
1471         cipher.len = blocksize + sizeof(*khdr);
1472
1473         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1474                 LBUG();
1475                 rc = 0;
1476         } else {
1477                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1478                                        conf, desc, &cipher, adj_nob);
1479         }
1480
1481         if (rc != 0) {
1482                 rawobj_free(&cksum);
1483                 return GSS_S_FAILURE;
1484         }
1485
1486         /* fill in checksum */
1487         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1488         memcpy((char *)(khdr + 1) + cipher.len,
1489                cksum.data + cksum.len - ke->ke_hash_size,
1490                ke->ke_hash_size);
1491         rawobj_free(&cksum);
1492
1493         /* final token length */
1494         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1495         return GSS_S_COMPLETE;
1496 }
1497
1498 static
1499 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1500                           rawobj_t        *gsshdr,
1501                           rawobj_t        *token,
1502                           rawobj_t        *msg)
1503 {
1504         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1505         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1506         struct krb5_header  *khdr;
1507         unsigned char       *tmpbuf;
1508         int                  blocksize, bodysize;
1509         rawobj_t             cksum = RAWOBJ_EMPTY;
1510         rawobj_t             cipher_in, plain_out;
1511         rawobj_t             hash_objs[3];
1512         int                  rc = 0;
1513         __u32                major;
1514
1515         LASSERT(ke);
1516
1517         if (token->len < sizeof(*khdr)) {
1518                 CERROR("short signature: %u\n", token->len);
1519                 return GSS_S_DEFECTIVE_TOKEN;
1520         }
1521
1522         khdr = (struct krb5_header *) token->data;
1523
1524         major = verify_krb5_header(kctx, khdr, 1);
1525         if (major != GSS_S_COMPLETE) {
1526                 CERROR("bad krb5 header\n");
1527                 return major;
1528         }
1529
1530         /* block size */
1531         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1532                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1533                 blocksize = 1;
1534         } else {
1535                 LASSERT(kctx->kc_keye.kb_tfm);
1536                 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1537         }
1538
1539         /* expected token layout:
1540          * ----------------------------------------
1541          * | krb5 header | cipher text | checksum |
1542          * ----------------------------------------
1543          */
1544         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1545
1546         if (bodysize % blocksize) {
1547                 CERROR("odd bodysize %d\n", bodysize);
1548                 return GSS_S_DEFECTIVE_TOKEN;
1549         }
1550
1551         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1552                 CERROR("incomplete token: bodysize %d\n", bodysize);
1553                 return GSS_S_DEFECTIVE_TOKEN;
1554         }
1555
1556         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1557                 CERROR("buffer too small: %u, require %d\n",
1558                        msg->len, bodysize - ke->ke_conf_size);
1559                 return GSS_S_FAILURE;
1560         }
1561
1562         /* decrypting */
1563         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1564         if (!tmpbuf)
1565                 return GSS_S_FAILURE;
1566
1567         major = GSS_S_FAILURE;
1568
1569         cipher_in.data = (__u8 *) (khdr + 1);
1570         cipher_in.len = bodysize;
1571         plain_out.data = tmpbuf;
1572         plain_out.len = bodysize;
1573
1574         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1575                 rawobj_t                 arc4_keye;
1576                 struct ll_crypto_cipher *arc4_tfm;
1577
1578                 cksum.data = token->data + token->len - ke->ke_hash_size;
1579                 cksum.len = ke->ke_hash_size;
1580
1581                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1582                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1583                         CERROR("failed to obtain arc4 enc key\n");
1584                         GOTO(arc4_out, rc = -EACCES);
1585                 }
1586
1587                 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1588                 if (IS_ERR(arc4_tfm)) {
1589                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1590                         GOTO(arc4_out_key, rc = -EACCES);
1591                 }
1592
1593                 if (ll_crypto_blkcipher_setkey(arc4_tfm,
1594                                          arc4_keye.data, arc4_keye.len)) {
1595                         CERROR("failed to set arc4 key, len %d\n",
1596                                arc4_keye.len);
1597                         GOTO(arc4_out_tfm, rc = -EACCES);
1598                 }
1599
1600                 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1601                                           1, &cipher_in, &plain_out, 0);
1602 arc4_out_tfm:
1603                 ll_crypto_free_blkcipher(arc4_tfm);
1604 arc4_out_key:
1605                 rawobj_free(&arc4_keye);
1606 arc4_out:
1607                 cksum = RAWOBJ_EMPTY;
1608         } else {
1609                 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1610                                           1, &cipher_in, &plain_out, 0);
1611         }
1612
1613         if (rc != 0) {
1614                 CERROR("error decrypt\n");
1615                 goto out_free;
1616         }
1617         LASSERT(plain_out.len == bodysize);
1618
1619         /* expected clear text layout:
1620          * -----------------------------------------
1621          * | confounder | clear msgs | krb5 header |
1622          * -----------------------------------------
1623          */
1624
1625         /* verify krb5 header in token is not modified */
1626         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1627                    sizeof(*khdr))) {
1628                 CERROR("decrypted krb5 header mismatch\n");
1629                 goto out_free;
1630         }
1631
1632         /* verify checksum, compose clear text as layout:
1633          * ------------------------------------------------------
1634          * | confounder | gss header | clear msgs | krb5 header |
1635          * ------------------------------------------------------
1636          */
1637         hash_objs[0].len = ke->ke_conf_size;
1638         hash_objs[0].data = plain_out.data;
1639         hash_objs[1].len = gsshdr->len;
1640         hash_objs[1].data = gsshdr->data;
1641         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1642         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1643         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1644                                khdr, 3, hash_objs, 0, NULL, &cksum))
1645                 goto out_free;
1646
1647         LASSERT(cksum.len >= ke->ke_hash_size);
1648         if (memcmp((char *)(khdr + 1) + bodysize,
1649                    cksum.data + cksum.len - ke->ke_hash_size,
1650                    ke->ke_hash_size)) {
1651                 CERROR("checksum mismatch\n");
1652                 goto out_free;
1653         }
1654
1655         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1656         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1657
1658         major = GSS_S_COMPLETE;
1659 out_free:
1660         OBD_FREE_LARGE(tmpbuf, bodysize);
1661         rawobj_free(&cksum);
1662         return major;
1663 }
1664
1665 static
1666 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1667                                struct ptlrpc_bulk_desc *desc,
1668                                rawobj_t *token, int adj_nob)
1669 {
1670         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1671         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1672         struct krb5_header  *khdr;
1673         int                  blocksize;
1674         rawobj_t             cksum = RAWOBJ_EMPTY;
1675         rawobj_t             cipher, plain;
1676         rawobj_t             data_desc[1];
1677         int                  rc;
1678         __u32                major;
1679
1680         LASSERT(ke);
1681
1682         if (token->len < sizeof(*khdr)) {
1683                 CERROR("short signature: %u\n", token->len);
1684                 return GSS_S_DEFECTIVE_TOKEN;
1685         }
1686
1687         khdr = (struct krb5_header *) token->data;
1688
1689         major = verify_krb5_header(kctx, khdr, 1);
1690         if (major != GSS_S_COMPLETE) {
1691                 CERROR("bad krb5 header\n");
1692                 return major;
1693         }
1694
1695         /* block size */
1696         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1697                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1698                 blocksize = 1;
1699                 LBUG();
1700         } else {
1701                 LASSERT(kctx->kc_keye.kb_tfm);
1702                 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1703         }
1704         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1705
1706         /*
1707          * token format is expected as:
1708          * -----------------------------------------------
1709          * | krb5 header | head/tail cipher text | cksum |
1710          * -----------------------------------------------
1711          */
1712         if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1713                          ke->ke_hash_size) {
1714                 CERROR("short token size: %u\n", token->len);
1715                 return GSS_S_DEFECTIVE_TOKEN;
1716         }
1717
1718         cipher.data = (__u8 *) (khdr + 1);
1719         cipher.len = blocksize + sizeof(*khdr);
1720         plain.data = cipher.data;
1721         plain.len = cipher.len;
1722
1723         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1724                                desc, &cipher, &plain, adj_nob);
1725         if (rc)
1726                 return GSS_S_DEFECTIVE_TOKEN;
1727
1728         /*
1729          * verify checksum, compose clear text as layout:
1730          * ------------------------------------------
1731          * | confounder | clear pages | krb5 header |
1732          * ------------------------------------------
1733          */
1734         data_desc[0].data = plain.data;
1735         data_desc[0].len = blocksize;
1736
1737         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1738                                khdr, 1, data_desc,
1739                                desc->bd_iov_count, desc->bd_iov,
1740                                &cksum))
1741                 return GSS_S_FAILURE;
1742         LASSERT(cksum.len >= ke->ke_hash_size);
1743
1744         if (memcmp(plain.data + blocksize + sizeof(*khdr),
1745                    cksum.data + cksum.len - ke->ke_hash_size,
1746                    ke->ke_hash_size)) {
1747                 CERROR("checksum mismatch\n");
1748                 rawobj_free(&cksum);
1749                 return GSS_S_BAD_SIG;
1750         }
1751
1752         rawobj_free(&cksum);
1753         return GSS_S_COMPLETE;
1754 }
1755
1756 int gss_display_kerberos(struct gss_ctx        *ctx,
1757                          char                  *buf,
1758                          int                    bufsize)
1759 {
1760         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1761         int                 written;
1762
1763         written = snprintf(buf, bufsize, "krb5 (%s)",
1764                            enctype2str(kctx->kc_enctype));
1765         return written;
1766 }
1767
1768 static struct gss_api_ops gss_kerberos_ops = {
1769         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1770         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1771         .gss_inquire_context        = gss_inquire_context_kerberos,
1772         .gss_get_mic                = gss_get_mic_kerberos,
1773         .gss_verify_mic             = gss_verify_mic_kerberos,
1774         .gss_wrap                   = gss_wrap_kerberos,
1775         .gss_unwrap                 = gss_unwrap_kerberos,
1776         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1777         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1778         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1779         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1780         .gss_display                = gss_display_kerberos,
1781 };
1782
1783 static struct subflavor_desc gss_kerberos_sfs[] = {
1784         {
1785                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1786                 .sf_qop         = 0,
1787                 .sf_service     = SPTLRPC_SVC_NULL,
1788                 .sf_name        = "krb5n"
1789         },
1790         {
1791                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1792                 .sf_qop         = 0,
1793                 .sf_service     = SPTLRPC_SVC_AUTH,
1794                 .sf_name        = "krb5a"
1795         },
1796         {
1797                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1798                 .sf_qop         = 0,
1799                 .sf_service     = SPTLRPC_SVC_INTG,
1800                 .sf_name        = "krb5i"
1801         },
1802         {
1803                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1804                 .sf_qop         = 0,
1805                 .sf_service     = SPTLRPC_SVC_PRIV,
1806                 .sf_name        = "krb5p"
1807         },
1808 };
1809
1810 /*
1811  * currently we leave module owner NULL
1812  */
1813 static struct gss_api_mech gss_kerberos_mech = {
1814         .gm_owner       = NULL, /*THIS_MODULE, */
1815         .gm_name        = "krb5",
1816         .gm_oid         = (rawobj_t)
1817                                 {9, "\052\206\110\206\367\022\001\002\002"},
1818         .gm_ops         = &gss_kerberos_ops,
1819         .gm_sf_num      = 4,
1820         .gm_sfs         = gss_kerberos_sfs,
1821 };
1822
1823 int __init init_kerberos_module(void)
1824 {
1825         int status;
1826
1827         spin_lock_init(&krb5_seq_lock);
1828
1829         status = lgss_mech_register(&gss_kerberos_mech);
1830         if (status)
1831                 CERROR("Failed to register kerberos gss mechanism!\n");
1832         return status;
1833 }
1834
1835 void __exit cleanup_kerberos_module(void)
1836 {
1837         lgss_mech_unregister(&gss_kerberos_mech);
1838 }