Whamcloud - gitweb
LU-2221 ptlrpc: kerberos support for kernel>=2.6.24
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, 2012, Intel Corporation.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #ifdef __KERNEL__
53 #include <linux/init.h>
54 #include <linux/module.h>
55 #include <linux/slab.h>
56 #include <linux/crypto.h>
57 #include <linux/mutex.h>
58 #else
59 #include <liblustre.h>
60 #endif
61
62 #include <obd.h>
63 #include <obd_class.h>
64 #include <obd_support.h>
65 #include <lustre/lustre_idl.h>
66 #include <lustre_net.h>
67 #include <lustre_import.h>
68 #include <lustre_sec.h>
69
70 #include "gss_err.h"
71 #include "gss_internal.h"
72 #include "gss_api.h"
73 #include "gss_asn1.h"
74 #include "gss_krb5.h"
75
76 static spinlock_t krb5_seq_lock;
77
78 struct krb5_enctype {
79         char           *ke_dispname;
80         char           *ke_enc_name;            /* linux tfm name */
81         char           *ke_hash_name;           /* linux tfm name */
82         int             ke_enc_mode;            /* linux tfm mode */
83         int             ke_hash_size;           /* checksum size */
84         int             ke_conf_size;           /* confounder size */
85         unsigned int    ke_hash_hmac:1;         /* is hmac? */
86 };
87
88 /*
89  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
90  * but currently we simply CBC with padding, because linux doesn't support CTS
91  * yet. this need to be fixed in the future.
92  */
93 static struct krb5_enctype enctypes[] = {
94         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
95                 "des-cbc-md5",
96                 "cbc(des)",
97                 "md5",
98                 0,
99                 16,
100                 8,
101                 0,
102         },
103         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
104                 "des3-hmac-sha1",
105                 "cbc(des3_ede)",
106                 "hmac(sha1)",
107                 0,
108                 20,
109                 8,
110                 1,
111         },
112         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
113                 "aes128-cts-hmac-sha1-96",
114                 "cbc(aes)",
115                 "hmac(sha1)",
116                 0,
117                 12,
118                 16,
119                 1,
120         },
121         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
122                 "aes256-cts-hmac-sha1-96",
123                 "cbc(aes)",
124                 "hmac(sha1)",
125                 0,
126                 12,
127                 16,
128                 1,
129         },
130         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
131                 "arcfour-hmac-md5",
132                 "ecb(arc4)",
133                 "hmac(md5)",
134                 0,
135                 16,
136                 8,
137                 1,
138         },
139 };
140
141 #define MAX_ENCTYPES    sizeof(enctypes)/sizeof(struct krb5_enctype)
142
143 static const char * enctype2str(__u32 enctype)
144 {
145         if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
146                 return enctypes[enctype].ke_dispname;
147
148         return "unknown";
149 }
150
151 static
152 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
153 {
154         kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
155         if (IS_ERR(kb->kb_tfm)) {
156                 CERROR("failed to alloc tfm: %s, mode %d\n",
157                        alg_name, alg_mode);
158                 return -1;
159         }
160
161         if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
162                 CERROR("failed to set %s key, len %d\n",
163                        alg_name, kb->kb_key.len);
164                 return -1;
165         }
166
167         return 0;
168 }
169
170 static
171 int krb5_init_keys(struct krb5_ctx *kctx)
172 {
173         struct krb5_enctype *ke;
174
175         if (kctx->kc_enctype >= MAX_ENCTYPES ||
176             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
177                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
178                 return -1;
179         }
180
181         ke = &enctypes[kctx->kc_enctype];
182
183         /* tfm arc4 is stateful, user should alloc-use-free by his own */
184         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
185             keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
186                 return -1;
187
188         /* tfm hmac is stateful, user should alloc-use-free by his own */
189         if (ke->ke_hash_hmac == 0 &&
190             keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
191                 return -1;
192         if (ke->ke_hash_hmac == 0 &&
193             keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
194                 return -1;
195
196         return 0;
197 }
198
199 static
200 void keyblock_free(struct krb5_keyblock *kb)
201 {
202         rawobj_free(&kb->kb_key);
203         if (kb->kb_tfm)
204                 ll_crypto_free_blkcipher(kb->kb_tfm);
205 }
206
207 static
208 int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
209 {
210         return rawobj_dup(&new->kb_key, &kb->kb_key);
211 }
212
213 static
214 int get_bytes(char **ptr, const char *end, void *res, int len)
215 {
216         char *p, *q;
217         p = *ptr;
218         q = p + len;
219         if (q > end || q < p)
220                 return -1;
221         memcpy(res, p, len);
222         *ptr = q;
223         return 0;
224 }
225
226 static
227 int get_rawobj(char **ptr, const char *end, rawobj_t *res)
228 {
229         char   *p, *q;
230         __u32   len;
231
232         p = *ptr;
233         if (get_bytes(&p, end, &len, sizeof(len)))
234                 return -1;
235
236         q = p + len;
237         if (q > end || q < p)
238                 return -1;
239
240         OBD_ALLOC_LARGE(res->data, len);
241         if (!res->data)
242                 return -1;
243
244         res->len = len;
245         memcpy(res->data, p, len);
246         *ptr = q;
247         return 0;
248 }
249
250 static
251 int get_keyblock(char **ptr, const char *end,
252                  struct krb5_keyblock *kb, __u32 keysize)
253 {
254         char *buf;
255
256         OBD_ALLOC_LARGE(buf, keysize);
257         if (buf == NULL)
258                 return -1;
259
260         if (get_bytes(ptr, end, buf, keysize)) {
261                 OBD_FREE_LARGE(buf, keysize);
262                 return -1;
263         }
264
265         kb->kb_key.len = keysize;
266         kb->kb_key.data = buf;
267         return 0;
268 }
269
270 static
271 void delete_context_kerberos(struct krb5_ctx *kctx)
272 {
273         rawobj_free(&kctx->kc_mech_used);
274
275         keyblock_free(&kctx->kc_keye);
276         keyblock_free(&kctx->kc_keyi);
277         keyblock_free(&kctx->kc_keyc);
278 }
279
280 static
281 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
282 {
283         unsigned int    tmp_uint, keysize;
284
285         /* seed_init flag */
286         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
287                 goto out_err;
288         kctx->kc_seed_init = (tmp_uint != 0);
289
290         /* seed */
291         if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
292                 goto out_err;
293
294         /* sign/seal algorithm, not really used now */
295         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
296             get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
297                 goto out_err;
298
299         /* end time */
300         if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
301                 goto out_err;
302
303         /* seq send */
304         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
305                 goto out_err;
306         kctx->kc_seq_send = tmp_uint;
307
308         /* mech oid */
309         if (get_rawobj(&p, end, &kctx->kc_mech_used))
310                 goto out_err;
311
312         /* old style enc/seq keys in format:
313          *   - enctype (u32)
314          *   - keysize (u32)
315          *   - keydata
316          * we decompose them to fit into the new context
317          */
318
319         /* enc key */
320         if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
321                 goto out_err;
322
323         if (get_bytes(&p, end, &keysize, sizeof(keysize)))
324                 goto out_err;
325
326         if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
327                 goto out_err;
328
329         /* seq key */
330         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
331             tmp_uint != kctx->kc_enctype)
332                 goto out_err;
333
334         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
335             tmp_uint != keysize)
336                 goto out_err;
337
338         if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
339                 goto out_err;
340
341         /* old style fallback */
342         if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
343                 goto out_err;
344
345         if (p != end)
346                 goto out_err;
347
348         CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
349         return 0;
350 out_err:
351         return GSS_S_FAILURE;
352 }
353
354 /* Flags for version 2 context flags */
355 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
356 #define KRB5_CTX_FLAG_CFX               0x00000002
357 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
358
359 static
360 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
361 {
362         unsigned int    tmp_uint, keysize;
363
364         /* end time */
365         if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
366                 goto out_err;
367
368         /* flags */
369         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
370                 goto out_err;
371
372         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
373                 kctx->kc_initiate = 1;
374         if (tmp_uint & KRB5_CTX_FLAG_CFX)
375                 kctx->kc_cfx = 1;
376         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
377                 kctx->kc_have_acceptor_subkey = 1;
378
379         /* seq send */
380         if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
381                 goto out_err;
382
383         /* enctype */
384         if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
385                 goto out_err;
386
387         /* size of each key */
388         if (get_bytes(&p, end, &keysize, sizeof(keysize)))
389                 goto out_err;
390
391         /* number of keys - should always be 3 */
392         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
393                 goto out_err;
394
395         if (tmp_uint != 3) {
396                 CERROR("Invalid number of keys: %u\n", tmp_uint);
397                 goto out_err;
398         }
399
400         /* ke */
401         if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
402                 goto out_err;
403         /* ki */
404         if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
405                 goto out_err;
406         /* ki */
407         if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
408                 goto out_err;
409
410         CDEBUG(D_SEC, "succesfully imported v2 context\n");
411         return 0;
412 out_err:
413         return GSS_S_FAILURE;
414 }
415
416 /*
417  * The whole purpose here is trying to keep user level gss context parsing
418  * from nfs-utils unchanged as possible as we can, they are not quite mature
419  * yet, and many stuff still not clear, like heimdal etc.
420  */
421 static
422 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
423                                       struct gss_ctx *gctx)
424 {
425         struct krb5_ctx *kctx;
426         char            *p = (char *) inbuf->data;
427         char            *end = (char *) (inbuf->data + inbuf->len);
428         unsigned int     tmp_uint, rc;
429
430         if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
431                 CERROR("Fail to read version\n");
432                 return GSS_S_FAILURE;
433         }
434
435         /* only support 0, 1 for the moment */
436         if (tmp_uint > 2) {
437                 CERROR("Invalid version %u\n", tmp_uint);
438                 return GSS_S_FAILURE;
439         }
440
441         OBD_ALLOC_PTR(kctx);
442         if (!kctx)
443                 return GSS_S_FAILURE;
444
445         if (tmp_uint == 0 || tmp_uint == 1) {
446                 kctx->kc_initiate = tmp_uint;
447                 rc = import_context_rfc1964(kctx, p, end);
448         } else {
449                 rc = import_context_rfc4121(kctx, p, end);
450         }
451
452         if (rc == 0)
453                 rc = krb5_init_keys(kctx);
454
455         if (rc) {
456                 delete_context_kerberos(kctx);
457                 OBD_FREE_PTR(kctx);
458
459                 return GSS_S_FAILURE;
460         }
461
462         gctx->internal_ctx_id = kctx;
463         return GSS_S_COMPLETE;
464 }
465
466 static
467 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
468                                         struct gss_ctx *gctx_new)
469 {
470         struct krb5_ctx *kctx = gctx->internal_ctx_id;
471         struct krb5_ctx *knew;
472
473         OBD_ALLOC_PTR(knew);
474         if (!knew)
475                 return GSS_S_FAILURE;
476
477         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
478         knew->kc_cfx = kctx->kc_cfx;
479         knew->kc_seed_init = kctx->kc_seed_init;
480         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
481         knew->kc_endtime = kctx->kc_endtime;
482
483         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
484         knew->kc_seq_send = kctx->kc_seq_recv;
485         knew->kc_seq_recv = kctx->kc_seq_send;
486         knew->kc_enctype = kctx->kc_enctype;
487
488         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
489                 goto out_err;
490
491         if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
492                 goto out_err;
493         if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
494                 goto out_err;
495         if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
496                 goto out_err;
497         if (krb5_init_keys(knew))
498                 goto out_err;
499
500         gctx_new->internal_ctx_id = knew;
501         CDEBUG(D_SEC, "succesfully copied reverse context\n");
502         return GSS_S_COMPLETE;
503
504 out_err:
505         delete_context_kerberos(knew);
506         OBD_FREE_PTR(knew);
507         return GSS_S_FAILURE;
508 }
509
510 static
511 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
512                                    unsigned long  *endtime)
513 {
514         struct krb5_ctx *kctx = gctx->internal_ctx_id;
515
516         *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
517         return GSS_S_COMPLETE;
518 }
519
520 static
521 void gss_delete_sec_context_kerberos(void *internal_ctx)
522 {
523         struct krb5_ctx *kctx = internal_ctx;
524
525         delete_context_kerberos(kctx);
526         OBD_FREE_PTR(kctx);
527 }
528
529 static
530 void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
531 {
532         sg_set_buf(sg, ptr, len);
533 }
534
535 static
536 __u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
537                    int decrypt,
538                    void * iv,
539                    void * in,
540                    void * out,
541                    int length)
542 {
543         struct blkcipher_desc desc;
544         struct scatterlist    sg;
545         __u8 local_iv[16] = {0};
546         __u32 ret = -EINVAL;
547
548         LASSERT(tfm);
549         desc.tfm  = tfm;
550         desc.info = local_iv;
551         desc.flags= 0;
552
553         if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
554                 CERROR("output length %d mismatch blocksize %d\n",
555                        length, ll_crypto_blkcipher_blocksize(tfm));
556                 goto out;
557         }
558
559         if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
560                 CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
561                 goto out;
562         }
563
564         if (iv)
565                 memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
566
567         memcpy(out, in, length);
568         buf_to_sg(&sg, out, length);
569
570         if (decrypt)
571                 ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
572         else
573                 ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
574
575 out:
576         return(ret);
577 }
578
579 #ifdef HAVE_ASYNC_BLOCK_CIPHER
580
581 static inline
582 int krb5_digest_hmac(struct ll_crypto_hash *tfm,
583                      rawobj_t *key,
584                      struct krb5_header *khdr,
585                      int msgcnt, rawobj_t *msgs,
586                      int iovcnt, lnet_kiov_t *iovs,
587                      rawobj_t *cksum)
588 {
589         struct hash_desc   desc;
590         struct scatterlist sg[1];
591         int                i;
592
593         ll_crypto_hash_setkey(tfm, key->data, key->len);
594         desc.tfm  = tfm;
595         desc.flags= 0;
596
597         ll_crypto_hash_init(&desc);
598
599         for (i = 0; i < msgcnt; i++) {
600                 if (msgs[i].len == 0)
601                         continue;
602                 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
603                 ll_crypto_hash_update(&desc, sg, msgs[i].len);
604         }
605
606         for (i = 0; i < iovcnt; i++) {
607                 if (iovs[i].kiov_len == 0)
608                         continue;
609
610                 sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
611                             iovs[i].kiov_offset);
612                 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
613         }
614
615         if (khdr) {
616                 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
617                 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
618         }
619
620         return ll_crypto_hash_final(&desc, cksum->data);
621 }
622
623 #else /* ! HAVE_ASYNC_BLOCK_CIPHER */
624
625 static inline
626 int krb5_digest_hmac(struct ll_crypto_hash *tfm,
627                      rawobj_t *key,
628                      struct krb5_header *khdr,
629                      int msgcnt, rawobj_t *msgs,
630                      int iovcnt, lnet_kiov_t *iovs,
631                      rawobj_t *cksum)
632 {
633         struct scatterlist sg[1];
634         __u32              keylen = key->len, i;
635
636         crypto_hmac_init(tfm, key->data, &keylen);
637
638         for (i = 0; i < msgcnt; i++) {
639                 if (msgs[i].len == 0)
640                         continue;
641                 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
642                 crypto_hmac_update(tfm, sg, 1);
643         }
644
645         for (i = 0; i < iovcnt; i++) {
646                 if (iovs[i].kiov_len == 0)
647                         continue;
648
649                 sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
650                             iovs[i].kiov_offset);
651                 crypto_hmac_update(tfm, sg, 1);
652         }
653
654         if (khdr) {
655                 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
656                 crypto_hmac_update(tfm, sg, 1);
657         }
658
659         crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
660         return 0;
661 }
662
663 #endif /* HAVE_ASYNC_BLOCK_CIPHER */
664
665 static inline
666 int krb5_digest_norm(struct ll_crypto_hash *tfm,
667                      struct krb5_keyblock *kb,
668                      struct krb5_header *khdr,
669                      int msgcnt, rawobj_t *msgs,
670                      int iovcnt, lnet_kiov_t *iovs,
671                      rawobj_t *cksum)
672 {
673         struct hash_desc   desc;
674         struct scatterlist sg[1];
675         int                i;
676
677         LASSERT(kb->kb_tfm);
678         desc.tfm  = tfm;
679         desc.flags= 0;
680
681         ll_crypto_hash_init(&desc);
682
683         for (i = 0; i < msgcnt; i++) {
684                 if (msgs[i].len == 0)
685                         continue;
686                 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
687                 ll_crypto_hash_update(&desc, sg, msgs[i].len);
688         }
689
690         for (i = 0; i < iovcnt; i++) {
691                 if (iovs[i].kiov_len == 0)
692                         continue;
693
694                 sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
695                             iovs[i].kiov_offset);
696                 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
697         }
698
699         if (khdr) {
700                 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
701                 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
702         }
703
704         ll_crypto_hash_final(&desc, cksum->data);
705
706         return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
707                             cksum->data, cksum->len);
708 }
709
710 /*
711  * compute (keyed/keyless) checksum against the plain text which appended
712  * with krb5 wire token header.
713  */
714 static
715 __s32 krb5_make_checksum(__u32 enctype,
716                          struct krb5_keyblock *kb,
717                          struct krb5_header *khdr,
718                          int msgcnt, rawobj_t *msgs,
719                          int iovcnt, lnet_kiov_t *iovs,
720                          rawobj_t *cksum)
721 {
722         struct krb5_enctype   *ke = &enctypes[enctype];
723         struct ll_crypto_hash *tfm;
724         __u32                  code = GSS_S_FAILURE;
725         int                    rc;
726
727         if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
728                 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
729                 return GSS_S_FAILURE;
730         }
731
732         cksum->len = ll_crypto_hash_digestsize(tfm);
733         OBD_ALLOC_LARGE(cksum->data, cksum->len);
734         if (!cksum->data) {
735                 cksum->len = 0;
736                 goto out_tfm;
737         }
738
739         if (ke->ke_hash_hmac)
740                 rc = krb5_digest_hmac(tfm, &kb->kb_key,
741                                       khdr, msgcnt, msgs, iovcnt, iovs, cksum);
742         else
743                 rc = krb5_digest_norm(tfm, kb,
744                                       khdr, msgcnt, msgs, iovcnt, iovs, cksum);
745
746         if (rc == 0)
747                 code = GSS_S_COMPLETE;
748 out_tfm:
749         ll_crypto_free_hash(tfm);
750         return code;
751 }
752
753 static void fill_krb5_header(struct krb5_ctx *kctx,
754                              struct krb5_header *khdr,
755                              int privacy)
756 {
757         unsigned char acceptor_flag;
758
759         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
760
761         if (privacy) {
762                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
763                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
764                 khdr->kh_ec = cpu_to_be16(0);
765                 khdr->kh_rrc = cpu_to_be16(0);
766         } else {
767                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
768                 khdr->kh_flags = acceptor_flag;
769                 khdr->kh_ec = cpu_to_be16(0xffff);
770                 khdr->kh_rrc = cpu_to_be16(0xffff);
771         }
772
773         khdr->kh_filler = 0xff;
774         spin_lock(&krb5_seq_lock);
775         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
776         spin_unlock(&krb5_seq_lock);
777 }
778
779 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
780                                 struct krb5_header *khdr,
781                                 int privacy)
782 {
783         unsigned char acceptor_flag;
784         __u16         tok_id, ec_rrc;
785
786         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
787
788         if (privacy) {
789                 tok_id = KG_TOK_WRAP_MSG;
790                 ec_rrc = 0x0;
791         } else {
792                 tok_id = KG_TOK_MIC_MSG;
793                 ec_rrc = 0xffff;
794         }
795
796         /* sanity checks */
797         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
798                 CERROR("bad token id\n");
799                 return GSS_S_DEFECTIVE_TOKEN;
800         }
801         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
802                 CERROR("bad direction flag\n");
803                 return GSS_S_BAD_SIG;
804         }
805         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
806                 CERROR("missing confidential flag\n");
807                 return GSS_S_BAD_SIG;
808         }
809         if (khdr->kh_filler != 0xff) {
810                 CERROR("bad filler\n");
811                 return GSS_S_DEFECTIVE_TOKEN;
812         }
813         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
814             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
815                 CERROR("bad EC or RRC\n");
816                 return GSS_S_DEFECTIVE_TOKEN;
817         }
818         return GSS_S_COMPLETE;
819 }
820
821 static
822 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
823                            int msgcnt,
824                            rawobj_t *msgs,
825                            int iovcnt,
826                            lnet_kiov_t *iovs,
827                            rawobj_t *token)
828 {
829         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
830         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
831         struct krb5_header  *khdr;
832         rawobj_t             cksum = RAWOBJ_EMPTY;
833
834         /* fill krb5 header */
835         LASSERT(token->len >= sizeof(*khdr));
836         khdr = (struct krb5_header *) token->data;
837         fill_krb5_header(kctx, khdr, 0);
838
839         /* checksum */
840         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
841                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
842                 return GSS_S_FAILURE;
843
844         LASSERT(cksum.len >= ke->ke_hash_size);
845         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
846         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
847                ke->ke_hash_size);
848
849         token->len = sizeof(*khdr) + ke->ke_hash_size;
850         rawobj_free(&cksum);
851         return GSS_S_COMPLETE;
852 }
853
854 static
855 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
856                               int msgcnt,
857                               rawobj_t *msgs,
858                               int iovcnt,
859                               lnet_kiov_t *iovs,
860                               rawobj_t *token)
861 {
862         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
863         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
864         struct krb5_header  *khdr;
865         rawobj_t             cksum = RAWOBJ_EMPTY;
866         __u32                major;
867
868         if (token->len < sizeof(*khdr)) {
869                 CERROR("short signature: %u\n", token->len);
870                 return GSS_S_DEFECTIVE_TOKEN;
871         }
872
873         khdr = (struct krb5_header *) token->data;
874
875         major = verify_krb5_header(kctx, khdr, 0);
876         if (major != GSS_S_COMPLETE) {
877                 CERROR("bad krb5 header\n");
878                 return major;
879         }
880
881         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
882                 CERROR("short signature: %u, require %d\n",
883                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
884                 return GSS_S_FAILURE;
885         }
886
887         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
888                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
889                 CERROR("failed to make checksum\n");
890                 return GSS_S_FAILURE;
891         }
892
893         LASSERT(cksum.len >= ke->ke_hash_size);
894         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
895                    ke->ke_hash_size)) {
896                 CERROR("checksum mismatch\n");
897                 rawobj_free(&cksum);
898                 return GSS_S_BAD_SIG;
899         }
900
901         rawobj_free(&cksum);
902         return GSS_S_COMPLETE;
903 }
904
905 static
906 int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
907 {
908         int padding;
909
910         padding = (blocksize - (msg->len & (blocksize - 1))) &
911                   (blocksize - 1);
912         if (!padding)
913                 return 0;
914
915         if (msg->len + padding > msg_buflen) {
916                 CERROR("bufsize %u too small: datalen %u, padding %u\n",
917                         msg_buflen, msg->len, padding);
918                 return -EINVAL;
919         }
920
921         memset(msg->data + msg->len, padding, padding);
922         msg->len += padding;
923         return 0;
924 }
925
926 static
927 int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
928                          int mode_ecb,
929                          int inobj_cnt,
930                          rawobj_t *inobjs,
931                          rawobj_t *outobj,
932                          int enc)
933 {
934         struct blkcipher_desc desc;
935         struct scatterlist    src, dst;
936         __u8                  local_iv[16] = {0}, *buf;
937         __u32                 datalen = 0;
938         int                   i, rc;
939         ENTRY;
940
941         buf = outobj->data;
942         desc.tfm  = tfm;
943         desc.info = local_iv;
944         desc.flags = 0;
945
946         for (i = 0; i < inobj_cnt; i++) {
947                 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
948
949                 buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
950                 buf_to_sg(&dst, buf, outobj->len - datalen);
951
952                 if (mode_ecb) {
953                         if (enc)
954                                 rc = ll_crypto_blkcipher_encrypt(
955                                         &desc, &dst, &src, src.length);
956                         else
957                                 rc = ll_crypto_blkcipher_decrypt(
958                                         &desc, &dst, &src, src.length);
959                 } else {
960                         if (enc)
961                                 rc = ll_crypto_blkcipher_encrypt_iv(
962                                         &desc, &dst, &src, src.length);
963                         else
964                                 rc = ll_crypto_blkcipher_decrypt_iv(
965                                         &desc, &dst, &src, src.length);
966                 }
967
968                 if (rc) {
969                         CERROR("encrypt error %d\n", rc);
970                         RETURN(rc);
971                 }
972
973                 datalen += inobjs[i].len;
974                 buf += inobjs[i].len;
975         }
976
977         outobj->len = datalen;
978         RETURN(0);
979 }
980
981 /*
982  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
983  */
984 static
985 int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
986                       struct krb5_header *khdr,
987                       char *confounder,
988                       struct ptlrpc_bulk_desc *desc,
989                       rawobj_t *cipher,
990                       int adj_nob)
991 {
992         struct blkcipher_desc   ciph_desc;
993         __u8                    local_iv[16] = {0};
994         struct scatterlist      src, dst;
995         int                     blocksize, i, rc, nob = 0;
996
997         LASSERT(desc->bd_iov_count);
998         LASSERT(desc->bd_enc_iov);
999
1000         blocksize = ll_crypto_blkcipher_blocksize(tfm);
1001         LASSERT(blocksize > 1);
1002         LASSERT(cipher->len == blocksize + sizeof(*khdr));
1003
1004         ciph_desc.tfm  = tfm;
1005         ciph_desc.info = local_iv;
1006         ciph_desc.flags = 0;
1007
1008         /* encrypt confounder */
1009         buf_to_sg(&src, confounder, blocksize);
1010         buf_to_sg(&dst, cipher->data, blocksize);
1011
1012         rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
1013         if (rc) {
1014                 CERROR("error to encrypt confounder: %d\n", rc);
1015                 return rc;
1016         }
1017
1018         /* encrypt clear pages */
1019         for (i = 0; i < desc->bd_iov_count; i++) {
1020                 sg_set_page(&src, desc->bd_iov[i].kiov_page,
1021                             (desc->bd_iov[i].kiov_len + blocksize - 1) &
1022                             (~(blocksize - 1)),
1023                             desc->bd_iov[i].kiov_offset);
1024                 if (adj_nob)
1025                         nob += src.length;
1026                 sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
1027                             src.offset);
1028
1029                 desc->bd_enc_iov[i].kiov_offset = dst.offset;
1030                 desc->bd_enc_iov[i].kiov_len = dst.length;
1031
1032                 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
1033                                                     src.length);
1034                 if (rc) {
1035                         CERROR("error to encrypt page: %d\n", rc);
1036                         return rc;
1037                 }
1038         }
1039
1040         /* encrypt krb5 header */
1041         buf_to_sg(&src, khdr, sizeof(*khdr));
1042         buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1043
1044         rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
1045                                             &dst, &src, sizeof(*khdr));
1046         if (rc) {
1047                 CERROR("error to encrypt krb5 header: %d\n", rc);
1048                 return rc;
1049         }
1050
1051         if (adj_nob)
1052                 desc->bd_nob = nob;
1053
1054         return 0;
1055 }
1056
1057 /*
1058  * desc->bd_nob_transferred is the size of cipher text received.
1059  * desc->bd_nob is the target size of plain text supposed to be.
1060  *
1061  * if adj_nob != 0, we adjust each page's kiov_len to the actual
1062  * plain text size.
1063  * - for client read: we don't know data size for each page, so
1064  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
1065  *   be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
1066  *   this means we DO NOT support the situation that server send an odd size
1067  *   data in a page which is not the last one.
1068  * - for server write: we knows exactly data size for each page being expected,
1069  *   thus kiov_len is accurate already, so we should not adjust it at all.
1070  *   and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
1071  *   should have been done by prep_bulk().
1072  */
1073 static
1074 int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
1075                       struct krb5_header *khdr,
1076                       struct ptlrpc_bulk_desc *desc,
1077                       rawobj_t *cipher,
1078                       rawobj_t *plain,
1079                       int adj_nob)
1080 {
1081         struct blkcipher_desc   ciph_desc;
1082         __u8                    local_iv[16] = {0};
1083         struct scatterlist      src, dst;
1084         int                     ct_nob = 0, pt_nob = 0;
1085         int                     blocksize, i, rc;
1086
1087         LASSERT(desc->bd_iov_count);
1088         LASSERT(desc->bd_enc_iov);
1089         LASSERT(desc->bd_nob_transferred);
1090
1091         blocksize = ll_crypto_blkcipher_blocksize(tfm);
1092         LASSERT(blocksize > 1);
1093         LASSERT(cipher->len == blocksize + sizeof(*khdr));
1094
1095         ciph_desc.tfm  = tfm;
1096         ciph_desc.info = local_iv;
1097         ciph_desc.flags = 0;
1098
1099         if (desc->bd_nob_transferred % blocksize) {
1100                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
1101                 return -EPROTO;
1102         }
1103
1104         /* decrypt head (confounder) */
1105         buf_to_sg(&src, cipher->data, blocksize);
1106         buf_to_sg(&dst, plain->data, blocksize);
1107
1108         rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
1109         if (rc) {
1110                 CERROR("error to decrypt confounder: %d\n", rc);
1111                 return rc;
1112         }
1113
1114         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
1115              i++) {
1116                 if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
1117                     desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
1118                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
1119                                i, desc->bd_enc_iov[i].kiov_offset,
1120                                desc->bd_enc_iov[i].kiov_len, blocksize);
1121                         return -EFAULT;
1122                 }
1123
1124                 if (adj_nob) {
1125                         if (ct_nob + desc->bd_enc_iov[i].kiov_len >
1126                             desc->bd_nob_transferred)
1127                                 desc->bd_enc_iov[i].kiov_len =
1128                                         desc->bd_nob_transferred - ct_nob;
1129
1130                         desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
1131                         if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
1132                                 desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
1133                 } else {
1134                         /* this should be guaranteed by LNET */
1135                         LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
1136                                 desc->bd_nob_transferred);
1137                         LASSERT(desc->bd_iov[i].kiov_len <=
1138                                 desc->bd_enc_iov[i].kiov_len);
1139                 }
1140
1141                 if (desc->bd_enc_iov[i].kiov_len == 0)
1142                         continue;
1143
1144                 sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
1145                             desc->bd_enc_iov[i].kiov_len,
1146                             desc->bd_enc_iov[i].kiov_offset);
1147                 dst = src;
1148                 if (desc->bd_iov[i].kiov_len % blocksize == 0)
1149                         sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
1150
1151                 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
1152                                                     src.length);
1153                 if (rc) {
1154                         CERROR("error to decrypt page: %d\n", rc);
1155                         return rc;
1156                 }
1157
1158                 if (desc->bd_iov[i].kiov_len % blocksize != 0) {
1159                         memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) +
1160                                desc->bd_iov[i].kiov_offset,
1161                                cfs_page_address(desc->bd_enc_iov[i].kiov_page) +
1162                                desc->bd_iov[i].kiov_offset,
1163                                desc->bd_iov[i].kiov_len);
1164                 }
1165
1166                 ct_nob += desc->bd_enc_iov[i].kiov_len;
1167                 pt_nob += desc->bd_iov[i].kiov_len;
1168         }
1169
1170         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
1171                 CERROR("%d cipher text transferred but only %d decrypted\n",
1172                        desc->bd_nob_transferred, ct_nob);
1173                 return -EFAULT;
1174         }
1175
1176         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
1177                 CERROR("%d plain text expected but only %d received\n",
1178                        desc->bd_nob, pt_nob);
1179                 return -EFAULT;
1180         }
1181
1182         /* if needed, clear up the rest unused iovs */
1183         if (adj_nob)
1184                 while (i < desc->bd_iov_count)
1185                         desc->bd_iov[i++].kiov_len = 0;
1186
1187         /* decrypt tail (krb5 header) */
1188         buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
1189         buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1190
1191         rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
1192                                             &dst, &src, sizeof(*khdr));
1193         if (rc) {
1194                 CERROR("error to decrypt tail: %d\n", rc);
1195                 return rc;
1196         }
1197
1198         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
1199                 CERROR("krb5 header doesn't match\n");
1200                 return -EACCES;
1201         }
1202
1203         return 0;
1204 }
1205
1206 static
1207 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
1208                         rawobj_t *gsshdr,
1209                         rawobj_t *msg,
1210                         int msg_buflen,
1211                         rawobj_t *token)
1212 {
1213         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1214         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1215         struct krb5_header  *khdr;
1216         int                  blocksize;
1217         rawobj_t             cksum = RAWOBJ_EMPTY;
1218         rawobj_t             data_desc[3], cipher;
1219         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1220         int                  rc = 0;
1221
1222         LASSERT(ke);
1223         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1224         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
1225                 ke->ke_conf_size >=
1226                 ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
1227
1228         /*
1229          * final token format:
1230          * ---------------------------------------------------
1231          * | krb5 header | cipher text | checksum (16 bytes) |
1232          * ---------------------------------------------------
1233          */
1234
1235         /* fill krb5 header */
1236         LASSERT(token->len >= sizeof(*khdr));
1237         khdr = (struct krb5_header *) token->data;
1238         fill_krb5_header(kctx, khdr, 1);
1239
1240         /* generate confounder */
1241         cfs_get_random_bytes(conf, ke->ke_conf_size);
1242
1243         /* get encryption blocksize. note kc_keye might not associated with
1244          * a tfm, currently only for arcfour-hmac */
1245         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1246                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1247                 blocksize = 1;
1248         } else {
1249                 LASSERT(kctx->kc_keye.kb_tfm);
1250                 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1251         }
1252         LASSERT(blocksize <= ke->ke_conf_size);
1253
1254         /* padding the message */
1255         if (add_padding(msg, msg_buflen, blocksize))
1256                 return GSS_S_FAILURE;
1257
1258         /*
1259          * clear text layout for checksum:
1260          * ------------------------------------------------------
1261          * | confounder | gss header | clear msgs | krb5 header |
1262          * ------------------------------------------------------
1263          */
1264         data_desc[0].data = conf;
1265         data_desc[0].len = ke->ke_conf_size;
1266         data_desc[1].data = gsshdr->data;
1267         data_desc[1].len = gsshdr->len;
1268         data_desc[2].data = msg->data;
1269         data_desc[2].len = msg->len;
1270
1271         /* compute checksum */
1272         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1273                                khdr, 3, data_desc, 0, NULL, &cksum))
1274                 return GSS_S_FAILURE;
1275         LASSERT(cksum.len >= ke->ke_hash_size);
1276
1277         /*
1278          * clear text layout for encryption:
1279          * -----------------------------------------
1280          * | confounder | clear msgs | krb5 header |
1281          * -----------------------------------------
1282          */
1283         data_desc[0].data = conf;
1284         data_desc[0].len = ke->ke_conf_size;
1285         data_desc[1].data = msg->data;
1286         data_desc[1].len = msg->len;
1287         data_desc[2].data = (__u8 *) khdr;
1288         data_desc[2].len = sizeof(*khdr);
1289
1290         /* cipher text will be directly inplace */
1291         cipher.data = (__u8 *) (khdr + 1);
1292         cipher.len = token->len - sizeof(*khdr);
1293         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1294
1295         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1296                 rawobj_t                 arc4_keye;
1297                 struct ll_crypto_cipher *arc4_tfm;
1298
1299                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1300                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1301                         CERROR("failed to obtain arc4 enc key\n");
1302                         GOTO(arc4_out, rc = -EACCES);
1303                 }
1304
1305                 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1306                 if (IS_ERR(arc4_tfm)) {
1307                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1308                         GOTO(arc4_out_key, rc = -EACCES);
1309                 }
1310
1311                 if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1312                                                arc4_keye.len)) {
1313                         CERROR("failed to set arc4 key, len %d\n",
1314                                arc4_keye.len);
1315                         GOTO(arc4_out_tfm, rc = -EACCES);
1316                 }
1317
1318                 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1319                                           3, data_desc, &cipher, 1);
1320 arc4_out_tfm:
1321                 ll_crypto_free_blkcipher(arc4_tfm);
1322 arc4_out_key:
1323                 rawobj_free(&arc4_keye);
1324 arc4_out:
1325                 do {} while(0); /* just to avoid compile warning */
1326         } else {
1327                 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1328                                           3, data_desc, &cipher, 1);
1329         }
1330
1331         if (rc != 0) {
1332                 rawobj_free(&cksum);
1333                 return GSS_S_FAILURE;
1334         }
1335
1336         /* fill in checksum */
1337         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1338         memcpy((char *)(khdr + 1) + cipher.len,
1339                cksum.data + cksum.len - ke->ke_hash_size,
1340                ke->ke_hash_size);
1341         rawobj_free(&cksum);
1342
1343         /* final token length */
1344         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1345         return GSS_S_COMPLETE;
1346 }
1347
1348 static
1349 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1350                              struct ptlrpc_bulk_desc *desc)
1351 {
1352         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1353         int                  blocksize, i;
1354
1355         LASSERT(desc->bd_iov_count);
1356         LASSERT(desc->bd_enc_iov);
1357         LASSERT(kctx->kc_keye.kb_tfm);
1358
1359         blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1360
1361         for (i = 0; i < desc->bd_iov_count; i++) {
1362                 LASSERT(desc->bd_enc_iov[i].kiov_page);
1363                 /*
1364                  * offset should always start at page boundary of either
1365                  * client or server side.
1366                  */
1367                 if (desc->bd_iov[i].kiov_offset & blocksize) {
1368                         CERROR("odd offset %d in page %d\n",
1369                                desc->bd_iov[i].kiov_offset, i);
1370                         return GSS_S_FAILURE;
1371                 }
1372
1373                 desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
1374                 desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
1375                                                 blocksize - 1) & (~(blocksize - 1));
1376         }
1377
1378         return GSS_S_COMPLETE;
1379 }
1380
1381 static
1382 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1383                              struct ptlrpc_bulk_desc *desc,
1384                              rawobj_t *token, int adj_nob)
1385 {
1386         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1387         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1388         struct krb5_header  *khdr;
1389         int                  blocksize;
1390         rawobj_t             cksum = RAWOBJ_EMPTY;
1391         rawobj_t             data_desc[1], cipher;
1392         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1393         int                  rc = 0;
1394
1395         LASSERT(ke);
1396         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1397
1398         /*
1399          * final token format:
1400          * --------------------------------------------------
1401          * | krb5 header | head/tail cipher text | checksum |
1402          * --------------------------------------------------
1403          */
1404
1405         /* fill krb5 header */
1406         LASSERT(token->len >= sizeof(*khdr));
1407         khdr = (struct krb5_header *) token->data;
1408         fill_krb5_header(kctx, khdr, 1);
1409
1410         /* generate confounder */
1411         cfs_get_random_bytes(conf, ke->ke_conf_size);
1412
1413         /* get encryption blocksize. note kc_keye might not associated with
1414          * a tfm, currently only for arcfour-hmac */
1415         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1416                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1417                 blocksize = 1;
1418         } else {
1419                 LASSERT(kctx->kc_keye.kb_tfm);
1420                 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1421         }
1422
1423         /*
1424          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1425          * the bulk token size would be exactly (sizeof(krb5_header) +
1426          * blocksize + sizeof(krb5_header) + hashsize)
1427          */
1428         LASSERT(blocksize <= ke->ke_conf_size);
1429         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1430         LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1431
1432         /*
1433          * clear text layout for checksum:
1434          * ------------------------------------------
1435          * | confounder | clear pages | krb5 header |
1436          * ------------------------------------------
1437          */
1438         data_desc[0].data = conf;
1439         data_desc[0].len = ke->ke_conf_size;
1440
1441         /* compute checksum */
1442         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1443                                khdr, 1, data_desc,
1444                                desc->bd_iov_count, desc->bd_iov,
1445                                &cksum))
1446                 return GSS_S_FAILURE;
1447         LASSERT(cksum.len >= ke->ke_hash_size);
1448
1449         /*
1450          * clear text layout for encryption:
1451          * ------------------------------------------
1452          * | confounder | clear pages | krb5 header |
1453          * ------------------------------------------
1454          *        |              |             |
1455          *        ----------  (cipher pages)   |
1456          * result token:   |                   |
1457          * -------------------------------------------
1458          * | krb5 header | cipher text | cipher text |
1459          * -------------------------------------------
1460          */
1461         data_desc[0].data = conf;
1462         data_desc[0].len = ke->ke_conf_size;
1463
1464         cipher.data = (__u8 *) (khdr + 1);
1465         cipher.len = blocksize + sizeof(*khdr);
1466
1467         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1468                 LBUG();
1469                 rc = 0;
1470         } else {
1471                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1472                                        conf, desc, &cipher, adj_nob);
1473         }
1474
1475         if (rc != 0) {
1476                 rawobj_free(&cksum);
1477                 return GSS_S_FAILURE;
1478         }
1479
1480         /* fill in checksum */
1481         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1482         memcpy((char *)(khdr + 1) + cipher.len,
1483                cksum.data + cksum.len - ke->ke_hash_size,
1484                ke->ke_hash_size);
1485         rawobj_free(&cksum);
1486
1487         /* final token length */
1488         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1489         return GSS_S_COMPLETE;
1490 }
1491
1492 static
1493 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1494                           rawobj_t        *gsshdr,
1495                           rawobj_t        *token,
1496                           rawobj_t        *msg)
1497 {
1498         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1499         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1500         struct krb5_header  *khdr;
1501         unsigned char       *tmpbuf;
1502         int                  blocksize, bodysize;
1503         rawobj_t             cksum = RAWOBJ_EMPTY;
1504         rawobj_t             cipher_in, plain_out;
1505         rawobj_t             hash_objs[3];
1506         int                  rc = 0;
1507         __u32                major;
1508
1509         LASSERT(ke);
1510
1511         if (token->len < sizeof(*khdr)) {
1512                 CERROR("short signature: %u\n", token->len);
1513                 return GSS_S_DEFECTIVE_TOKEN;
1514         }
1515
1516         khdr = (struct krb5_header *) token->data;
1517
1518         major = verify_krb5_header(kctx, khdr, 1);
1519         if (major != GSS_S_COMPLETE) {
1520                 CERROR("bad krb5 header\n");
1521                 return major;
1522         }
1523
1524         /* block size */
1525         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1526                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1527                 blocksize = 1;
1528         } else {
1529                 LASSERT(kctx->kc_keye.kb_tfm);
1530                 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1531         }
1532
1533         /* expected token layout:
1534          * ----------------------------------------
1535          * | krb5 header | cipher text | checksum |
1536          * ----------------------------------------
1537          */
1538         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1539
1540         if (bodysize % blocksize) {
1541                 CERROR("odd bodysize %d\n", bodysize);
1542                 return GSS_S_DEFECTIVE_TOKEN;
1543         }
1544
1545         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1546                 CERROR("incomplete token: bodysize %d\n", bodysize);
1547                 return GSS_S_DEFECTIVE_TOKEN;
1548         }
1549
1550         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1551                 CERROR("buffer too small: %u, require %d\n",
1552                        msg->len, bodysize - ke->ke_conf_size);
1553                 return GSS_S_FAILURE;
1554         }
1555
1556         /* decrypting */
1557         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1558         if (!tmpbuf)
1559                 return GSS_S_FAILURE;
1560
1561         major = GSS_S_FAILURE;
1562
1563         cipher_in.data = (__u8 *) (khdr + 1);
1564         cipher_in.len = bodysize;
1565         plain_out.data = tmpbuf;
1566         plain_out.len = bodysize;
1567
1568         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1569                 rawobj_t                 arc4_keye;
1570                 struct ll_crypto_cipher *arc4_tfm;
1571
1572                 cksum.data = token->data + token->len - ke->ke_hash_size;
1573                 cksum.len = ke->ke_hash_size;
1574
1575                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1576                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1577                         CERROR("failed to obtain arc4 enc key\n");
1578                         GOTO(arc4_out, rc = -EACCES);
1579                 }
1580
1581                 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1582                 if (IS_ERR(arc4_tfm)) {
1583                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1584                         GOTO(arc4_out_key, rc = -EACCES);
1585                 }
1586
1587                 if (ll_crypto_blkcipher_setkey(arc4_tfm,
1588                                          arc4_keye.data, arc4_keye.len)) {
1589                         CERROR("failed to set arc4 key, len %d\n",
1590                                arc4_keye.len);
1591                         GOTO(arc4_out_tfm, rc = -EACCES);
1592                 }
1593
1594                 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1595                                           1, &cipher_in, &plain_out, 0);
1596 arc4_out_tfm:
1597                 ll_crypto_free_blkcipher(arc4_tfm);
1598 arc4_out_key:
1599                 rawobj_free(&arc4_keye);
1600 arc4_out:
1601                 cksum = RAWOBJ_EMPTY;
1602         } else {
1603                 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1604                                           1, &cipher_in, &plain_out, 0);
1605         }
1606
1607         if (rc != 0) {
1608                 CERROR("error decrypt\n");
1609                 goto out_free;
1610         }
1611         LASSERT(plain_out.len == bodysize);
1612
1613         /* expected clear text layout:
1614          * -----------------------------------------
1615          * | confounder | clear msgs | krb5 header |
1616          * -----------------------------------------
1617          */
1618
1619         /* verify krb5 header in token is not modified */
1620         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1621                    sizeof(*khdr))) {
1622                 CERROR("decrypted krb5 header mismatch\n");
1623                 goto out_free;
1624         }
1625
1626         /* verify checksum, compose clear text as layout:
1627          * ------------------------------------------------------
1628          * | confounder | gss header | clear msgs | krb5 header |
1629          * ------------------------------------------------------
1630          */
1631         hash_objs[0].len = ke->ke_conf_size;
1632         hash_objs[0].data = plain_out.data;
1633         hash_objs[1].len = gsshdr->len;
1634         hash_objs[1].data = gsshdr->data;
1635         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1636         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1637         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1638                                khdr, 3, hash_objs, 0, NULL, &cksum))
1639                 goto out_free;
1640
1641         LASSERT(cksum.len >= ke->ke_hash_size);
1642         if (memcmp((char *)(khdr + 1) + bodysize,
1643                    cksum.data + cksum.len - ke->ke_hash_size,
1644                    ke->ke_hash_size)) {
1645                 CERROR("checksum mismatch\n");
1646                 goto out_free;
1647         }
1648
1649         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1650         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1651
1652         major = GSS_S_COMPLETE;
1653 out_free:
1654         OBD_FREE_LARGE(tmpbuf, bodysize);
1655         rawobj_free(&cksum);
1656         return major;
1657 }
1658
1659 static
1660 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1661                                struct ptlrpc_bulk_desc *desc,
1662                                rawobj_t *token, int adj_nob)
1663 {
1664         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1665         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1666         struct krb5_header  *khdr;
1667         int                  blocksize;
1668         rawobj_t             cksum = RAWOBJ_EMPTY;
1669         rawobj_t             cipher, plain;
1670         rawobj_t             data_desc[1];
1671         int                  rc;
1672         __u32                major;
1673
1674         LASSERT(ke);
1675
1676         if (token->len < sizeof(*khdr)) {
1677                 CERROR("short signature: %u\n", token->len);
1678                 return GSS_S_DEFECTIVE_TOKEN;
1679         }
1680
1681         khdr = (struct krb5_header *) token->data;
1682
1683         major = verify_krb5_header(kctx, khdr, 1);
1684         if (major != GSS_S_COMPLETE) {
1685                 CERROR("bad krb5 header\n");
1686                 return major;
1687         }
1688
1689         /* block size */
1690         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1691                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1692                 blocksize = 1;
1693                 LBUG();
1694         } else {
1695                 LASSERT(kctx->kc_keye.kb_tfm);
1696                 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1697         }
1698         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1699
1700         /*
1701          * token format is expected as:
1702          * -----------------------------------------------
1703          * | krb5 header | head/tail cipher text | cksum |
1704          * -----------------------------------------------
1705          */
1706         if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1707                          ke->ke_hash_size) {
1708                 CERROR("short token size: %u\n", token->len);
1709                 return GSS_S_DEFECTIVE_TOKEN;
1710         }
1711
1712         cipher.data = (__u8 *) (khdr + 1);
1713         cipher.len = blocksize + sizeof(*khdr);
1714         plain.data = cipher.data;
1715         plain.len = cipher.len;
1716
1717         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1718                                desc, &cipher, &plain, adj_nob);
1719         if (rc)
1720                 return GSS_S_DEFECTIVE_TOKEN;
1721
1722         /*
1723          * verify checksum, compose clear text as layout:
1724          * ------------------------------------------
1725          * | confounder | clear pages | krb5 header |
1726          * ------------------------------------------
1727          */
1728         data_desc[0].data = plain.data;
1729         data_desc[0].len = blocksize;
1730
1731         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1732                                khdr, 1, data_desc,
1733                                desc->bd_iov_count, desc->bd_iov,
1734                                &cksum))
1735                 return GSS_S_FAILURE;
1736         LASSERT(cksum.len >= ke->ke_hash_size);
1737
1738         if (memcmp(plain.data + blocksize + sizeof(*khdr),
1739                    cksum.data + cksum.len - ke->ke_hash_size,
1740                    ke->ke_hash_size)) {
1741                 CERROR("checksum mismatch\n");
1742                 rawobj_free(&cksum);
1743                 return GSS_S_BAD_SIG;
1744         }
1745
1746         rawobj_free(&cksum);
1747         return GSS_S_COMPLETE;
1748 }
1749
1750 int gss_display_kerberos(struct gss_ctx        *ctx,
1751                          char                  *buf,
1752                          int                    bufsize)
1753 {
1754         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1755         int                 written;
1756
1757         written = snprintf(buf, bufsize, "krb5 (%s)",
1758                            enctype2str(kctx->kc_enctype));
1759         return written;
1760 }
1761
1762 static struct gss_api_ops gss_kerberos_ops = {
1763         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1764         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1765         .gss_inquire_context        = gss_inquire_context_kerberos,
1766         .gss_get_mic                = gss_get_mic_kerberos,
1767         .gss_verify_mic             = gss_verify_mic_kerberos,
1768         .gss_wrap                   = gss_wrap_kerberos,
1769         .gss_unwrap                 = gss_unwrap_kerberos,
1770         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1771         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1772         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1773         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1774         .gss_display                = gss_display_kerberos,
1775 };
1776
1777 static struct subflavor_desc gss_kerberos_sfs[] = {
1778         {
1779                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1780                 .sf_qop         = 0,
1781                 .sf_service     = SPTLRPC_SVC_NULL,
1782                 .sf_name        = "krb5n"
1783         },
1784         {
1785                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1786                 .sf_qop         = 0,
1787                 .sf_service     = SPTLRPC_SVC_AUTH,
1788                 .sf_name        = "krb5a"
1789         },
1790         {
1791                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1792                 .sf_qop         = 0,
1793                 .sf_service     = SPTLRPC_SVC_INTG,
1794                 .sf_name        = "krb5i"
1795         },
1796         {
1797                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1798                 .sf_qop         = 0,
1799                 .sf_service     = SPTLRPC_SVC_PRIV,
1800                 .sf_name        = "krb5p"
1801         },
1802 };
1803
1804 /*
1805  * currently we leave module owner NULL
1806  */
1807 static struct gss_api_mech gss_kerberos_mech = {
1808         .gm_owner       = NULL, /*THIS_MODULE, */
1809         .gm_name        = "krb5",
1810         .gm_oid         = (rawobj_t)
1811                                 {9, "\052\206\110\206\367\022\001\002\002"},
1812         .gm_ops         = &gss_kerberos_ops,
1813         .gm_sf_num      = 4,
1814         .gm_sfs         = gss_kerberos_sfs,
1815 };
1816
1817 int __init init_kerberos_module(void)
1818 {
1819         int status;
1820
1821         spin_lock_init(&krb5_seq_lock);
1822
1823         status = lgss_mech_register(&gss_kerberos_mech);
1824         if (status)
1825                 CERROR("Failed to register kerberos gss mechanism!\n");
1826         return status;
1827 }
1828
1829 void __exit cleanup_kerberos_module(void)
1830 {
1831         lgss_mech_unregister(&gss_kerberos_mech);
1832 }