Whamcloud - gitweb
LU-9073 gss: remove newer kernel support
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, 2015, Intel Corporation.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/crypto.h>
56 #include <linux/mutex.h>
57
58 #include <obd.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre/lustre_idl.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
65
66 #include "gss_err.h"
67 #include "gss_internal.h"
68 #include "gss_api.h"
69 #include "gss_asn1.h"
70 #include "gss_krb5.h"
71 #include "gss_crypto.h"
72
73 static spinlock_t krb5_seq_lock;
74
75 struct krb5_enctype {
76         char           *ke_dispname;
77         char           *ke_enc_name;            /* linux tfm name */
78         char           *ke_hash_name;           /* linux tfm name */
79         int             ke_enc_mode;            /* linux tfm mode */
80         int             ke_hash_size;           /* checksum size */
81         int             ke_conf_size;           /* confounder size */
82         unsigned int    ke_hash_hmac:1;         /* is hmac? */
83 };
84
85 /*
86  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
87  * but currently we simply CBC with padding, because linux doesn't support CTS
88  * yet. this need to be fixed in the future.
89  */
90 static struct krb5_enctype enctypes[] = {
91         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
92                 .ke_dispname    = "des-cbc-md5",
93                 .ke_enc_name    = "cbc(des)",
94                 .ke_hash_name   = "md5",
95                 .ke_hash_size   = 16,
96                 .ke_conf_size   = 8,
97         },
98         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
99                 .ke_dispname    = "des3-hmac-sha1",
100                 .ke_enc_name    = "cbc(des3_ede)",
101                 .ke_hash_name   = "hmac(sha1)",
102                 .ke_hash_size   = 20,
103                 .ke_conf_size   = 8,
104                 .ke_hash_hmac   = 1,
105         },
106         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
107                 .ke_dispname    = "aes128-cts-hmac-sha1-96",
108                 .ke_enc_name    = "cbc(aes)",
109                 .ke_hash_name   = "hmac(sha1)",
110                 .ke_hash_size   = 12,
111                 .ke_conf_size   = 16,
112                 .ke_hash_hmac   = 1,
113         },
114         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
115                 .ke_dispname    = "aes256-cts-hmac-sha1-96",
116                 .ke_enc_name    = "cbc(aes)",
117                 .ke_hash_name   = "hmac(sha1)",
118                 .ke_hash_size   = 12,
119                 .ke_conf_size   = 16,
120                 .ke_hash_hmac   = 1,
121         },
122         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
123                 .ke_dispname    = "arcfour-hmac-md5",
124                 .ke_enc_name    = "ecb(arc4)",
125                 .ke_hash_name   = "hmac(md5)",
126                 .ke_hash_size   = 16,
127                 .ke_conf_size   = 8,
128                 .ke_hash_hmac   = 1,
129         }
130 };
131
132 #define MAX_ENCTYPES    sizeof(enctypes)/sizeof(struct krb5_enctype)
133
134 static const char * enctype2str(__u32 enctype)
135 {
136         if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
137                 return enctypes[enctype].ke_dispname;
138
139         return "unknown";
140 }
141
142 static
143 int krb5_init_keys(struct krb5_ctx *kctx)
144 {
145         struct krb5_enctype *ke;
146
147         if (kctx->kc_enctype >= MAX_ENCTYPES ||
148             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
149                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
150                 return -1;
151         }
152
153         ke = &enctypes[kctx->kc_enctype];
154
155         /* tfm arc4 is stateful, user should alloc-use-free by his own */
156         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
157             gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
158                 return -1;
159
160         /* tfm hmac is stateful, user should alloc-use-free by his own */
161         if (ke->ke_hash_hmac == 0 &&
162             gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
163                 return -1;
164         if (ke->ke_hash_hmac == 0 &&
165             gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
166                 return -1;
167
168         return 0;
169 }
170
171 static
172 void delete_context_kerberos(struct krb5_ctx *kctx)
173 {
174         rawobj_free(&kctx->kc_mech_used);
175
176         gss_keyblock_free(&kctx->kc_keye);
177         gss_keyblock_free(&kctx->kc_keyi);
178         gss_keyblock_free(&kctx->kc_keyc);
179 }
180
181 static
182 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
183 {
184         unsigned int    tmp_uint, keysize;
185
186         /* seed_init flag */
187         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
188                 goto out_err;
189         kctx->kc_seed_init = (tmp_uint != 0);
190
191         /* seed */
192         if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
193                 goto out_err;
194
195         /* sign/seal algorithm, not really used now */
196         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
197             gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
198                 goto out_err;
199
200         /* end time */
201         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
202                 goto out_err;
203
204         /* seq send */
205         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
206                 goto out_err;
207         kctx->kc_seq_send = tmp_uint;
208
209         /* mech oid */
210         if (gss_get_rawobj(&p, end, &kctx->kc_mech_used))
211                 goto out_err;
212
213         /* old style enc/seq keys in format:
214          *   - enctype (u32)
215          *   - keysize (u32)
216          *   - keydata
217          * we decompose them to fit into the new context
218          */
219
220         /* enc key */
221         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
222                 goto out_err;
223
224         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
225                 goto out_err;
226
227         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
228                 goto out_err;
229
230         /* seq key */
231         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
232             tmp_uint != kctx->kc_enctype)
233                 goto out_err;
234
235         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
236             tmp_uint != keysize)
237                 goto out_err;
238
239         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
240                 goto out_err;
241
242         /* old style fallback */
243         if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
244                 goto out_err;
245
246         if (p != end)
247                 goto out_err;
248
249         CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
250         return 0;
251 out_err:
252         return GSS_S_FAILURE;
253 }
254
255 /* Flags for version 2 context flags */
256 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
257 #define KRB5_CTX_FLAG_CFX               0x00000002
258 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
259
260 static
261 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
262 {
263         unsigned int    tmp_uint, keysize;
264
265         /* end time */
266         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
267                 goto out_err;
268
269         /* flags */
270         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
271                 goto out_err;
272
273         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
274                 kctx->kc_initiate = 1;
275         if (tmp_uint & KRB5_CTX_FLAG_CFX)
276                 kctx->kc_cfx = 1;
277         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
278                 kctx->kc_have_acceptor_subkey = 1;
279
280         /* seq send */
281         if (gss_get_bytes(&p, end, &kctx->kc_seq_send,
282             sizeof(kctx->kc_seq_send)))
283                 goto out_err;
284
285         /* enctype */
286         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
287                 goto out_err;
288
289         /* size of each key */
290         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
291                 goto out_err;
292
293         /* number of keys - should always be 3 */
294         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
295                 goto out_err;
296
297         if (tmp_uint != 3) {
298                 CERROR("Invalid number of keys: %u\n", tmp_uint);
299                 goto out_err;
300         }
301
302         /* ke */
303         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
304                 goto out_err;
305         /* ki */
306         if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize))
307                 goto out_err;
308         /* ki */
309         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
310                 goto out_err;
311
312         CDEBUG(D_SEC, "successfully imported v2 context\n");
313         return 0;
314 out_err:
315         return GSS_S_FAILURE;
316 }
317
318 /*
319  * The whole purpose here is trying to keep user level gss context parsing
320  * from nfs-utils unchanged as possible as we can, they are not quite mature
321  * yet, and many stuff still not clear, like heimdal etc.
322  */
323 static
324 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
325                                       struct gss_ctx *gctx)
326 {
327         struct krb5_ctx *kctx;
328         char *p = (char *)inbuf->data;
329         char *end = (char *)(inbuf->data + inbuf->len);
330         unsigned int tmp_uint, rc;
331
332         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
333                 CERROR("Fail to read version\n");
334                 return GSS_S_FAILURE;
335         }
336
337         /* only support 0, 1 for the moment */
338         if (tmp_uint > 2) {
339                 CERROR("Invalid version %u\n", tmp_uint);
340                 return GSS_S_FAILURE;
341         }
342
343         OBD_ALLOC_PTR(kctx);
344         if (!kctx)
345                 return GSS_S_FAILURE;
346
347         if (tmp_uint == 0 || tmp_uint == 1) {
348                 kctx->kc_initiate = tmp_uint;
349                 rc = import_context_rfc1964(kctx, p, end);
350         } else {
351                 rc = import_context_rfc4121(kctx, p, end);
352         }
353
354         if (rc == 0)
355                 rc = krb5_init_keys(kctx);
356
357         if (rc) {
358                 delete_context_kerberos(kctx);
359                 OBD_FREE_PTR(kctx);
360
361                 return GSS_S_FAILURE;
362         }
363
364         gctx->internal_ctx_id = kctx;
365         return GSS_S_COMPLETE;
366 }
367
368 static
369 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
370                                         struct gss_ctx *gctx_new)
371 {
372         struct krb5_ctx *kctx = gctx->internal_ctx_id;
373         struct krb5_ctx *knew;
374
375         OBD_ALLOC_PTR(knew);
376         if (!knew)
377                 return GSS_S_FAILURE;
378
379         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
380         knew->kc_cfx = kctx->kc_cfx;
381         knew->kc_seed_init = kctx->kc_seed_init;
382         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
383         knew->kc_endtime = kctx->kc_endtime;
384
385         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
386         knew->kc_seq_send = kctx->kc_seq_recv;
387         knew->kc_seq_recv = kctx->kc_seq_send;
388         knew->kc_enctype = kctx->kc_enctype;
389
390         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
391                 goto out_err;
392
393         if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
394                 goto out_err;
395         if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
396                 goto out_err;
397         if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
398                 goto out_err;
399         if (krb5_init_keys(knew))
400                 goto out_err;
401
402         gctx_new->internal_ctx_id = knew;
403         CDEBUG(D_SEC, "successfully copied reverse context\n");
404         return GSS_S_COMPLETE;
405
406 out_err:
407         delete_context_kerberos(knew);
408         OBD_FREE_PTR(knew);
409         return GSS_S_FAILURE;
410 }
411
412 static
413 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
414                                    unsigned long  *endtime)
415 {
416         struct krb5_ctx *kctx = gctx->internal_ctx_id;
417
418         *endtime = (unsigned long)((__u32) kctx->kc_endtime);
419         return GSS_S_COMPLETE;
420 }
421
422 static
423 void gss_delete_sec_context_kerberos(void *internal_ctx)
424 {
425         struct krb5_ctx *kctx = internal_ctx;
426
427         delete_context_kerberos(kctx);
428         OBD_FREE_PTR(kctx);
429 }
430
431 /*
432  * compute (keyed/keyless) checksum against the plain text which appended
433  * with krb5 wire token header.
434  */
435 static
436 __s32 krb5_make_checksum(__u32 enctype,
437                          struct gss_keyblock *kb,
438                          struct krb5_header *khdr,
439                          int msgcnt, rawobj_t *msgs,
440                          int iovcnt, lnet_kiov_t *iovs,
441                          rawobj_t *cksum)
442 {
443         struct krb5_enctype   *ke = &enctypes[enctype];
444         struct crypto_hash    *tfm;
445         rawobj_t               hdr;
446         __u32                  code = GSS_S_FAILURE;
447         int                    rc;
448
449         if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
450                 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
451                 return GSS_S_FAILURE;
452         }
453
454         cksum->len = crypto_hash_digestsize(tfm);
455         OBD_ALLOC_LARGE(cksum->data, cksum->len);
456         if (!cksum->data) {
457                 cksum->len = 0;
458                 goto out_tfm;
459         }
460
461         hdr.data = (__u8 *)khdr;
462         hdr.len = sizeof(*khdr);
463
464         if (ke->ke_hash_hmac)
465                 rc = gss_digest_hmac(tfm, &kb->kb_key,
466                                      &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
467         else
468                 rc = gss_digest_norm(tfm, kb,
469                                      &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
470
471         if (rc == 0)
472                 code = GSS_S_COMPLETE;
473 out_tfm:
474         crypto_free_hash(tfm);
475         return code;
476 }
477
478 static void fill_krb5_header(struct krb5_ctx *kctx,
479                              struct krb5_header *khdr,
480                              int privacy)
481 {
482         unsigned char acceptor_flag;
483
484         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
485
486         if (privacy) {
487                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
488                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
489                 khdr->kh_ec = cpu_to_be16(0);
490                 khdr->kh_rrc = cpu_to_be16(0);
491         } else {
492                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
493                 khdr->kh_flags = acceptor_flag;
494                 khdr->kh_ec = cpu_to_be16(0xffff);
495                 khdr->kh_rrc = cpu_to_be16(0xffff);
496         }
497
498         khdr->kh_filler = 0xff;
499         spin_lock(&krb5_seq_lock);
500         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
501         spin_unlock(&krb5_seq_lock);
502 }
503
504 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
505                                 struct krb5_header *khdr,
506                                 int privacy)
507 {
508         unsigned char acceptor_flag;
509         __u16         tok_id, ec_rrc;
510
511         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
512
513         if (privacy) {
514                 tok_id = KG_TOK_WRAP_MSG;
515                 ec_rrc = 0x0;
516         } else {
517                 tok_id = KG_TOK_MIC_MSG;
518                 ec_rrc = 0xffff;
519         }
520
521         /* sanity checks */
522         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
523                 CERROR("bad token id\n");
524                 return GSS_S_DEFECTIVE_TOKEN;
525         }
526         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
527                 CERROR("bad direction flag\n");
528                 return GSS_S_BAD_SIG;
529         }
530         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
531                 CERROR("missing confidential flag\n");
532                 return GSS_S_BAD_SIG;
533         }
534         if (khdr->kh_filler != 0xff) {
535                 CERROR("bad filler\n");
536                 return GSS_S_DEFECTIVE_TOKEN;
537         }
538         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
539             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
540                 CERROR("bad EC or RRC\n");
541                 return GSS_S_DEFECTIVE_TOKEN;
542         }
543         return GSS_S_COMPLETE;
544 }
545
546 static
547 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
548                            int msgcnt,
549                            rawobj_t *msgs,
550                            int iovcnt,
551                            lnet_kiov_t *iovs,
552                            rawobj_t *token)
553 {
554         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
555         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
556         struct krb5_header  *khdr;
557         rawobj_t             cksum = RAWOBJ_EMPTY;
558
559         /* fill krb5 header */
560         LASSERT(token->len >= sizeof(*khdr));
561         khdr = (struct krb5_header *)token->data;
562         fill_krb5_header(kctx, khdr, 0);
563
564         /* checksum */
565         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
566                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
567                 return GSS_S_FAILURE;
568
569         LASSERT(cksum.len >= ke->ke_hash_size);
570         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
571         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
572                ke->ke_hash_size);
573
574         token->len = sizeof(*khdr) + ke->ke_hash_size;
575         rawobj_free(&cksum);
576         return GSS_S_COMPLETE;
577 }
578
579 static
580 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
581                               int msgcnt,
582                               rawobj_t *msgs,
583                               int iovcnt,
584                               lnet_kiov_t *iovs,
585                               rawobj_t *token)
586 {
587         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
588         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
589         struct krb5_header  *khdr;
590         rawobj_t             cksum = RAWOBJ_EMPTY;
591         __u32                major;
592
593         if (token->len < sizeof(*khdr)) {
594                 CERROR("short signature: %u\n", token->len);
595                 return GSS_S_DEFECTIVE_TOKEN;
596         }
597
598         khdr = (struct krb5_header *)token->data;
599
600         major = verify_krb5_header(kctx, khdr, 0);
601         if (major != GSS_S_COMPLETE) {
602                 CERROR("bad krb5 header\n");
603                 return major;
604         }
605
606         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
607                 CERROR("short signature: %u, require %d\n",
608                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
609                 return GSS_S_FAILURE;
610         }
611
612         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
613                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
614                 CERROR("failed to make checksum\n");
615                 return GSS_S_FAILURE;
616         }
617
618         LASSERT(cksum.len >= ke->ke_hash_size);
619         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
620                    ke->ke_hash_size)) {
621                 CERROR("checksum mismatch\n");
622                 rawobj_free(&cksum);
623                 return GSS_S_BAD_SIG;
624         }
625
626         rawobj_free(&cksum);
627         return GSS_S_COMPLETE;
628 }
629
630 /*
631  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
632  */
633 static
634 int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
635                       struct krb5_header *khdr,
636                       char *confounder,
637                       struct ptlrpc_bulk_desc *desc,
638                       rawobj_t *cipher,
639                       int adj_nob)
640 {
641         struct blkcipher_desc   ciph_desc;
642         __u8                    local_iv[16] = {0};
643         struct scatterlist      src, dst;
644         struct sg_table         sg_src, sg_dst;
645         int                     blocksize, i, rc, nob = 0;
646
647         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
648         LASSERT(desc->bd_iov_count);
649         LASSERT(GET_ENC_KIOV(desc));
650
651         blocksize = crypto_blkcipher_blocksize(tfm);
652         LASSERT(blocksize > 1);
653         LASSERT(cipher->len == blocksize + sizeof(*khdr));
654
655         ciph_desc.tfm  = tfm;
656         ciph_desc.info = local_iv;
657         ciph_desc.flags = 0;
658
659         /* encrypt confounder */
660         rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
661         if (rc != 0)
662                 return rc;
663
664         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
665         if (rc != 0) {
666                 gss_teardown_sgtable(&sg_src);
667                 return rc;
668         }
669
670         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
671                                          sg_src.sgl, blocksize);
672
673         gss_teardown_sgtable(&sg_dst);
674         gss_teardown_sgtable(&sg_src);
675
676         if (rc) {
677                 CERROR("error to encrypt confounder: %d\n", rc);
678                 return rc;
679         }
680
681         /* encrypt clear pages */
682         for (i = 0; i < desc->bd_iov_count; i++) {
683                 sg_init_table(&src, 1);
684                 sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
685                             (BD_GET_KIOV(desc, i).kiov_len +
686                                 blocksize - 1) &
687                             (~(blocksize - 1)),
688                             BD_GET_KIOV(desc, i).kiov_offset);
689                 if (adj_nob)
690                         nob += src.length;
691                 sg_init_table(&dst, 1);
692                 sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
693                             src.length, src.offset);
694
695                 BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
696                 BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
697
698                 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
699                                                     src.length);
700                 if (rc) {
701                         CERROR("error to encrypt page: %d\n", rc);
702                         return rc;
703                 }
704         }
705
706         /* encrypt krb5 header */
707         rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
708         if (rc != 0)
709                 return rc;
710
711         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
712                            sizeof(*khdr));
713         if (rc != 0) {
714                 gss_teardown_sgtable(&sg_src);
715                 return rc;
716         }
717
718         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
719                                          sizeof(*khdr));
720
721         gss_teardown_sgtable(&sg_dst);
722         gss_teardown_sgtable(&sg_src);
723
724         if (rc) {
725                 CERROR("error to encrypt krb5 header: %d\n", rc);
726                 return rc;
727         }
728
729         if (adj_nob)
730                 desc->bd_nob = nob;
731
732         return 0;
733 }
734
735 /*
736  * desc->bd_nob_transferred is the size of cipher text received.
737  * desc->bd_nob is the target size of plain text supposed to be.
738  *
739  * if adj_nob != 0, we adjust each page's kiov_len to the actual
740  * plain text size.
741  * - for client read: we don't know data size for each page, so
742  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
743  *   be smaller, so we need to adjust it according to
744  *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
745  *   this means we DO NOT support the situation that server send an odd size
746  *   data in a page which is not the last one.
747  * - for server write: we knows exactly data size for each page being expected,
748  *   thus kiov_len is accurate already, so we should not adjust it at all.
749  *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
750  *   round_up(bd_iov[]->kiov_len) which
751  *   should have been done by prep_bulk().
752  */
753 static
754 int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
755                       struct krb5_header *khdr,
756                       struct ptlrpc_bulk_desc *desc,
757                       rawobj_t *cipher,
758                       rawobj_t *plain,
759                       int adj_nob)
760 {
761         struct blkcipher_desc   ciph_desc;
762         __u8                    local_iv[16] = {0};
763         struct scatterlist      src, dst;
764         struct sg_table         sg_src, sg_dst;
765         int                     ct_nob = 0, pt_nob = 0;
766         int                     blocksize, i, rc;
767
768         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
769         LASSERT(desc->bd_iov_count);
770         LASSERT(GET_ENC_KIOV(desc));
771         LASSERT(desc->bd_nob_transferred);
772
773         blocksize = crypto_blkcipher_blocksize(tfm);
774         LASSERT(blocksize > 1);
775         LASSERT(cipher->len == blocksize + sizeof(*khdr));
776
777         ciph_desc.tfm  = tfm;
778         ciph_desc.info = local_iv;
779         ciph_desc.flags = 0;
780
781         if (desc->bd_nob_transferred % blocksize) {
782                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
783                 return -EPROTO;
784         }
785
786         /* decrypt head (confounder) */
787         rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
788         if (rc != 0)
789                 return rc;
790
791         rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
792         if (rc != 0) {
793                 gss_teardown_sgtable(&sg_src);
794                 return rc;
795         }
796
797         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
798                                          sg_src.sgl, blocksize);
799
800         gss_teardown_sgtable(&sg_dst);
801         gss_teardown_sgtable(&sg_src);
802
803         if (rc) {
804                 CERROR("error to decrypt confounder: %d\n", rc);
805                 return rc;
806         }
807
808         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
809              i++) {
810                 if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
811                     != 0 ||
812                     BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
813                     != 0) {
814                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
815                                i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
816                                BD_GET_ENC_KIOV(desc, i).kiov_len,
817                                blocksize);
818                         return -EFAULT;
819                 }
820
821                 if (adj_nob) {
822                         if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
823                             desc->bd_nob_transferred)
824                                 BD_GET_ENC_KIOV(desc, i).kiov_len =
825                                         desc->bd_nob_transferred - ct_nob;
826
827                         BD_GET_KIOV(desc, i).kiov_len =
828                           BD_GET_ENC_KIOV(desc, i).kiov_len;
829                         if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
830                             desc->bd_nob)
831                                 BD_GET_KIOV(desc, i).kiov_len =
832                                   desc->bd_nob - pt_nob;
833                 } else {
834                         /* this should be guaranteed by LNET */
835                         LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
836                                 kiov_len <=
837                                 desc->bd_nob_transferred);
838                         LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
839                                 BD_GET_ENC_KIOV(desc, i).kiov_len);
840                 }
841
842                 if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
843                         continue;
844
845                 sg_init_table(&src, 1);
846                 sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
847                             BD_GET_ENC_KIOV(desc, i).kiov_len,
848                             BD_GET_ENC_KIOV(desc, i).kiov_offset);
849                 dst = src;
850                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
851                         sg_assign_page(&dst,
852                                        BD_GET_KIOV(desc, i).kiov_page);
853
854                 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
855                                                  src.length);
856                 if (rc) {
857                         CERROR("error to decrypt page: %d\n", rc);
858                         return rc;
859                 }
860
861                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
862                         memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
863                                BD_GET_KIOV(desc, i).kiov_offset,
864                                page_address(BD_GET_ENC_KIOV(desc, i).
865                                             kiov_page) +
866                                BD_GET_KIOV(desc, i).kiov_offset,
867                                BD_GET_KIOV(desc, i).kiov_len);
868                 }
869
870                 ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
871                 pt_nob += BD_GET_KIOV(desc, i).kiov_len;
872         }
873
874         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
875                 CERROR("%d cipher text transferred but only %d decrypted\n",
876                        desc->bd_nob_transferred, ct_nob);
877                 return -EFAULT;
878         }
879
880         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
881                 CERROR("%d plain text expected but only %d received\n",
882                        desc->bd_nob, pt_nob);
883                 return -EFAULT;
884         }
885
886         /* if needed, clear up the rest unused iovs */
887         if (adj_nob)
888                 while (i < desc->bd_iov_count)
889                         BD_GET_KIOV(desc, i++).kiov_len = 0;
890
891         /* decrypt tail (krb5 header) */
892         rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
893                                sizeof(*khdr));
894         if (rc != 0)
895                 return rc;
896
897         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
898                                sizeof(*khdr));
899         if (rc != 0) {
900                 gss_teardown_sgtable(&sg_src);
901                 return rc;
902         }
903
904         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
905                                          sizeof(*khdr));
906
907         gss_teardown_sgtable(&sg_src);
908         gss_teardown_sgtable(&sg_dst);
909
910         if (rc) {
911                 CERROR("error to decrypt tail: %d\n", rc);
912                 return rc;
913         }
914
915         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
916                 CERROR("krb5 header doesn't match\n");
917                 return -EACCES;
918         }
919
920         return 0;
921 }
922
923 static
924 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
925                         rawobj_t *gsshdr,
926                         rawobj_t *msg,
927                         int msg_buflen,
928                         rawobj_t *token)
929 {
930         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
931         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
932         struct krb5_header  *khdr;
933         int                  blocksize;
934         rawobj_t             cksum = RAWOBJ_EMPTY;
935         rawobj_t             data_desc[3], cipher;
936         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
937         __u8                 local_iv[16] = {0};
938         int                  rc = 0;
939
940         LASSERT(ke);
941         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
942         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
943                 ke->ke_conf_size >=
944                 crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
945
946         /*
947          * final token format:
948          * ---------------------------------------------------
949          * | krb5 header | cipher text | checksum (16 bytes) |
950          * ---------------------------------------------------
951          */
952
953         /* fill krb5 header */
954         LASSERT(token->len >= sizeof(*khdr));
955         khdr = (struct krb5_header *)token->data;
956         fill_krb5_header(kctx, khdr, 1);
957
958         /* generate confounder */
959         cfs_get_random_bytes(conf, ke->ke_conf_size);
960
961         /* get encryption blocksize. note kc_keye might not associated with
962          * a tfm, currently only for arcfour-hmac */
963         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
964                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
965                 blocksize = 1;
966         } else {
967                 LASSERT(kctx->kc_keye.kb_tfm);
968                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
969         }
970         LASSERT(blocksize <= ke->ke_conf_size);
971
972         /* padding the message */
973         if (gss_add_padding(msg, msg_buflen, blocksize))
974                 return GSS_S_FAILURE;
975
976         /*
977          * clear text layout for checksum:
978          * ------------------------------------------------------
979          * | confounder | gss header | clear msgs | krb5 header |
980          * ------------------------------------------------------
981          */
982         data_desc[0].data = conf;
983         data_desc[0].len = ke->ke_conf_size;
984         data_desc[1].data = gsshdr->data;
985         data_desc[1].len = gsshdr->len;
986         data_desc[2].data = msg->data;
987         data_desc[2].len = msg->len;
988
989         /* compute checksum */
990         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
991                                khdr, 3, data_desc, 0, NULL, &cksum))
992                 return GSS_S_FAILURE;
993         LASSERT(cksum.len >= ke->ke_hash_size);
994
995         /*
996          * clear text layout for encryption:
997          * -----------------------------------------
998          * | confounder | clear msgs | krb5 header |
999          * -----------------------------------------
1000          */
1001         data_desc[0].data = conf;
1002         data_desc[0].len = ke->ke_conf_size;
1003         data_desc[1].data = msg->data;
1004         data_desc[1].len = msg->len;
1005         data_desc[2].data = (__u8 *) khdr;
1006         data_desc[2].len = sizeof(*khdr);
1007
1008         /* cipher text will be directly inplace */
1009         cipher.data = (__u8 *)(khdr + 1);
1010         cipher.len = token->len - sizeof(*khdr);
1011         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1012
1013         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1014                 rawobj_t                 arc4_keye;
1015                 struct crypto_blkcipher *arc4_tfm;
1016
1017                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1018                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1019                         CERROR("failed to obtain arc4 enc key\n");
1020                         GOTO(arc4_out, rc = -EACCES);
1021                 }
1022
1023                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1024                 if (IS_ERR(arc4_tfm)) {
1025                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1026                         GOTO(arc4_out_key, rc = -EACCES);
1027                 }
1028
1029                 if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1030                                                arc4_keye.len)) {
1031                         CERROR("failed to set arc4 key, len %d\n",
1032                                arc4_keye.len);
1033                         GOTO(arc4_out_tfm, rc = -EACCES);
1034                 }
1035
1036                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
1037                                        &cipher, 1);
1038 arc4_out_tfm:
1039                 crypto_free_blkcipher(arc4_tfm);
1040 arc4_out_key:
1041                 rawobj_free(&arc4_keye);
1042 arc4_out:
1043                 do {} while(0); /* just to avoid compile warning */
1044         } else {
1045                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
1046                                        data_desc, &cipher, 1);
1047         }
1048
1049         if (rc != 0) {
1050                 rawobj_free(&cksum);
1051                 return GSS_S_FAILURE;
1052         }
1053
1054         /* fill in checksum */
1055         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1056         memcpy((char *)(khdr + 1) + cipher.len,
1057                cksum.data + cksum.len - ke->ke_hash_size,
1058                ke->ke_hash_size);
1059         rawobj_free(&cksum);
1060
1061         /* final token length */
1062         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1063         return GSS_S_COMPLETE;
1064 }
1065
1066 static
1067 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1068                              struct ptlrpc_bulk_desc *desc)
1069 {
1070         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1071         int                  blocksize, i;
1072
1073         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1074         LASSERT(desc->bd_iov_count);
1075         LASSERT(GET_ENC_KIOV(desc));
1076         LASSERT(kctx->kc_keye.kb_tfm);
1077
1078         blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1079
1080         for (i = 0; i < desc->bd_iov_count; i++) {
1081                 LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
1082                 /*
1083                  * offset should always start at page boundary of either
1084                  * client or server side.
1085                  */
1086                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
1087                         CERROR("odd offset %d in page %d\n",
1088                                BD_GET_KIOV(desc, i).kiov_offset, i);
1089                         return GSS_S_FAILURE;
1090                 }
1091
1092                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
1093                         BD_GET_KIOV(desc, i).kiov_offset;
1094                 BD_GET_ENC_KIOV(desc, i).kiov_len =
1095                         (BD_GET_KIOV(desc, i).kiov_len +
1096                          blocksize - 1) & (~(blocksize - 1));
1097         }
1098
1099         return GSS_S_COMPLETE;
1100 }
1101
1102 static
1103 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1104                              struct ptlrpc_bulk_desc *desc,
1105                              rawobj_t *token, int adj_nob)
1106 {
1107         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1108         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1109         struct krb5_header  *khdr;
1110         int                  blocksize;
1111         rawobj_t             cksum = RAWOBJ_EMPTY;
1112         rawobj_t             data_desc[1], cipher;
1113         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1114         int                  rc = 0;
1115
1116         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1117         LASSERT(ke);
1118         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1119
1120         /*
1121          * final token format:
1122          * --------------------------------------------------
1123          * | krb5 header | head/tail cipher text | checksum |
1124          * --------------------------------------------------
1125          */
1126
1127         /* fill krb5 header */
1128         LASSERT(token->len >= sizeof(*khdr));
1129         khdr = (struct krb5_header *)token->data;
1130         fill_krb5_header(kctx, khdr, 1);
1131
1132         /* generate confounder */
1133         cfs_get_random_bytes(conf, ke->ke_conf_size);
1134
1135         /* get encryption blocksize. note kc_keye might not associated with
1136          * a tfm, currently only for arcfour-hmac */
1137         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1138                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1139                 blocksize = 1;
1140         } else {
1141                 LASSERT(kctx->kc_keye.kb_tfm);
1142                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1143         }
1144
1145         /*
1146          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1147          * the bulk token size would be exactly (sizeof(krb5_header) +
1148          * blocksize + sizeof(krb5_header) + hashsize)
1149          */
1150         LASSERT(blocksize <= ke->ke_conf_size);
1151         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1152         LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1153
1154         /*
1155          * clear text layout for checksum:
1156          * ------------------------------------------
1157          * | confounder | clear pages | krb5 header |
1158          * ------------------------------------------
1159          */
1160         data_desc[0].data = conf;
1161         data_desc[0].len = ke->ke_conf_size;
1162
1163         /* compute checksum */
1164         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1165                                khdr, 1, data_desc,
1166                                desc->bd_iov_count, GET_KIOV(desc),
1167                                &cksum))
1168                 return GSS_S_FAILURE;
1169         LASSERT(cksum.len >= ke->ke_hash_size);
1170
1171         /*
1172          * clear text layout for encryption:
1173          * ------------------------------------------
1174          * | confounder | clear pages | krb5 header |
1175          * ------------------------------------------
1176          *        |              |             |
1177          *        ----------  (cipher pages)   |
1178          * result token:   |                   |
1179          * -------------------------------------------
1180          * | krb5 header | cipher text | cipher text |
1181          * -------------------------------------------
1182          */
1183         data_desc[0].data = conf;
1184         data_desc[0].len = ke->ke_conf_size;
1185
1186         cipher.data = (__u8 *)(khdr + 1);
1187         cipher.len = blocksize + sizeof(*khdr);
1188
1189         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1190                 LBUG();
1191                 rc = 0;
1192         } else {
1193                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1194                                        conf, desc, &cipher, adj_nob);
1195         }
1196
1197         if (rc != 0) {
1198                 rawobj_free(&cksum);
1199                 return GSS_S_FAILURE;
1200         }
1201
1202         /* fill in checksum */
1203         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1204         memcpy((char *)(khdr + 1) + cipher.len,
1205                cksum.data + cksum.len - ke->ke_hash_size,
1206                ke->ke_hash_size);
1207         rawobj_free(&cksum);
1208
1209         /* final token length */
1210         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1211         return GSS_S_COMPLETE;
1212 }
1213
1214 static
1215 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1216                           rawobj_t        *gsshdr,
1217                           rawobj_t        *token,
1218                           rawobj_t        *msg)
1219 {
1220         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1221         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1222         struct krb5_header  *khdr;
1223         unsigned char       *tmpbuf;
1224         int                  blocksize, bodysize;
1225         rawobj_t             cksum = RAWOBJ_EMPTY;
1226         rawobj_t             cipher_in, plain_out;
1227         rawobj_t             hash_objs[3];
1228         int                  rc = 0;
1229         __u32                major;
1230         __u8                 local_iv[16] = {0};
1231
1232         LASSERT(ke);
1233
1234         if (token->len < sizeof(*khdr)) {
1235                 CERROR("short signature: %u\n", token->len);
1236                 return GSS_S_DEFECTIVE_TOKEN;
1237         }
1238
1239         khdr = (struct krb5_header *)token->data;
1240
1241         major = verify_krb5_header(kctx, khdr, 1);
1242         if (major != GSS_S_COMPLETE) {
1243                 CERROR("bad krb5 header\n");
1244                 return major;
1245         }
1246
1247         /* block size */
1248         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1249                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1250                 blocksize = 1;
1251         } else {
1252                 LASSERT(kctx->kc_keye.kb_tfm);
1253                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1254         }
1255
1256         /* expected token layout:
1257          * ----------------------------------------
1258          * | krb5 header | cipher text | checksum |
1259          * ----------------------------------------
1260          */
1261         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1262
1263         if (bodysize % blocksize) {
1264                 CERROR("odd bodysize %d\n", bodysize);
1265                 return GSS_S_DEFECTIVE_TOKEN;
1266         }
1267
1268         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1269                 CERROR("incomplete token: bodysize %d\n", bodysize);
1270                 return GSS_S_DEFECTIVE_TOKEN;
1271         }
1272
1273         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1274                 CERROR("buffer too small: %u, require %d\n",
1275                        msg->len, bodysize - ke->ke_conf_size);
1276                 return GSS_S_FAILURE;
1277         }
1278
1279         /* decrypting */
1280         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1281         if (!tmpbuf)
1282                 return GSS_S_FAILURE;
1283
1284         major = GSS_S_FAILURE;
1285
1286         cipher_in.data = (__u8 *)(khdr + 1);
1287         cipher_in.len = bodysize;
1288         plain_out.data = tmpbuf;
1289         plain_out.len = bodysize;
1290
1291         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1292                 rawobj_t                 arc4_keye;
1293                 struct crypto_blkcipher *arc4_tfm;
1294
1295                 cksum.data = token->data + token->len - ke->ke_hash_size;
1296                 cksum.len = ke->ke_hash_size;
1297
1298                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1299                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1300                         CERROR("failed to obtain arc4 enc key\n");
1301                         GOTO(arc4_out, rc = -EACCES);
1302                 }
1303
1304                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1305                 if (IS_ERR(arc4_tfm)) {
1306                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1307                         GOTO(arc4_out_key, rc = -EACCES);
1308                 }
1309
1310                 if (crypto_blkcipher_setkey(arc4_tfm,
1311                                          arc4_keye.data, arc4_keye.len)) {
1312                         CERROR("failed to set arc4 key, len %d\n",
1313                                arc4_keye.len);
1314                         GOTO(arc4_out_tfm, rc = -EACCES);
1315                 }
1316
1317                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
1318                                        &plain_out, 0);
1319 arc4_out_tfm:
1320                 crypto_free_blkcipher(arc4_tfm);
1321 arc4_out_key:
1322                 rawobj_free(&arc4_keye);
1323 arc4_out:
1324                 cksum = RAWOBJ_EMPTY;
1325         } else {
1326                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1,
1327                                        &cipher_in, &plain_out, 0);
1328         }
1329
1330         if (rc != 0) {
1331                 CERROR("error decrypt\n");
1332                 goto out_free;
1333         }
1334         LASSERT(plain_out.len == bodysize);
1335
1336         /* expected clear text layout:
1337          * -----------------------------------------
1338          * | confounder | clear msgs | krb5 header |
1339          * -----------------------------------------
1340          */
1341
1342         /* verify krb5 header in token is not modified */
1343         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1344                    sizeof(*khdr))) {
1345                 CERROR("decrypted krb5 header mismatch\n");
1346                 goto out_free;
1347         }
1348
1349         /* verify checksum, compose clear text as layout:
1350          * ------------------------------------------------------
1351          * | confounder | gss header | clear msgs | krb5 header |
1352          * ------------------------------------------------------
1353          */
1354         hash_objs[0].len = ke->ke_conf_size;
1355         hash_objs[0].data = plain_out.data;
1356         hash_objs[1].len = gsshdr->len;
1357         hash_objs[1].data = gsshdr->data;
1358         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1359         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1360         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1361                                khdr, 3, hash_objs, 0, NULL, &cksum))
1362                 goto out_free;
1363
1364         LASSERT(cksum.len >= ke->ke_hash_size);
1365         if (memcmp((char *)(khdr + 1) + bodysize,
1366                    cksum.data + cksum.len - ke->ke_hash_size,
1367                    ke->ke_hash_size)) {
1368                 CERROR("checksum mismatch\n");
1369                 goto out_free;
1370         }
1371
1372         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1373         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1374
1375         major = GSS_S_COMPLETE;
1376 out_free:
1377         OBD_FREE_LARGE(tmpbuf, bodysize);
1378         rawobj_free(&cksum);
1379         return major;
1380 }
1381
1382 static
1383 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1384                                struct ptlrpc_bulk_desc *desc,
1385                                rawobj_t *token, int adj_nob)
1386 {
1387         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1388         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1389         struct krb5_header  *khdr;
1390         int                  blocksize;
1391         rawobj_t             cksum = RAWOBJ_EMPTY;
1392         rawobj_t             cipher, plain;
1393         rawobj_t             data_desc[1];
1394         int                  rc;
1395         __u32                major;
1396
1397         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1398         LASSERT(ke);
1399
1400         if (token->len < sizeof(*khdr)) {
1401                 CERROR("short signature: %u\n", token->len);
1402                 return GSS_S_DEFECTIVE_TOKEN;
1403         }
1404
1405         khdr = (struct krb5_header *)token->data;
1406
1407         major = verify_krb5_header(kctx, khdr, 1);
1408         if (major != GSS_S_COMPLETE) {
1409                 CERROR("bad krb5 header\n");
1410                 return major;
1411         }
1412
1413         /* block size */
1414         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1415                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1416                 blocksize = 1;
1417                 LBUG();
1418         } else {
1419                 LASSERT(kctx->kc_keye.kb_tfm);
1420                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1421         }
1422         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1423
1424         /*
1425          * token format is expected as:
1426          * -----------------------------------------------
1427          * | krb5 header | head/tail cipher text | cksum |
1428          * -----------------------------------------------
1429          */
1430         if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1431                          ke->ke_hash_size) {
1432                 CERROR("short token size: %u\n", token->len);
1433                 return GSS_S_DEFECTIVE_TOKEN;
1434         }
1435
1436         cipher.data = (__u8 *) (khdr + 1);
1437         cipher.len = blocksize + sizeof(*khdr);
1438         plain.data = cipher.data;
1439         plain.len = cipher.len;
1440
1441         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1442                                desc, &cipher, &plain, adj_nob);
1443         if (rc)
1444                 return GSS_S_DEFECTIVE_TOKEN;
1445
1446         /*
1447          * verify checksum, compose clear text as layout:
1448          * ------------------------------------------
1449          * | confounder | clear pages | krb5 header |
1450          * ------------------------------------------
1451          */
1452         data_desc[0].data = plain.data;
1453         data_desc[0].len = blocksize;
1454
1455         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1456                                khdr, 1, data_desc,
1457                                desc->bd_iov_count,
1458                                GET_KIOV(desc),
1459                                &cksum))
1460                 return GSS_S_FAILURE;
1461         LASSERT(cksum.len >= ke->ke_hash_size);
1462
1463         if (memcmp(plain.data + blocksize + sizeof(*khdr),
1464                    cksum.data + cksum.len - ke->ke_hash_size,
1465                    ke->ke_hash_size)) {
1466                 CERROR("checksum mismatch\n");
1467                 rawobj_free(&cksum);
1468                 return GSS_S_BAD_SIG;
1469         }
1470
1471         rawobj_free(&cksum);
1472         return GSS_S_COMPLETE;
1473 }
1474
1475 int gss_display_kerberos(struct gss_ctx        *ctx,
1476                          char                  *buf,
1477                          int                    bufsize)
1478 {
1479         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1480         int                 written;
1481
1482         written = snprintf(buf, bufsize, "krb5 (%s)",
1483                            enctype2str(kctx->kc_enctype));
1484         return written;
1485 }
1486
1487 static struct gss_api_ops gss_kerberos_ops = {
1488         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1489         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1490         .gss_inquire_context        = gss_inquire_context_kerberos,
1491         .gss_get_mic                = gss_get_mic_kerberos,
1492         .gss_verify_mic             = gss_verify_mic_kerberos,
1493         .gss_wrap                   = gss_wrap_kerberos,
1494         .gss_unwrap                 = gss_unwrap_kerberos,
1495         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1496         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1497         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1498         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1499         .gss_display                = gss_display_kerberos,
1500 };
1501
1502 static struct subflavor_desc gss_kerberos_sfs[] = {
1503         {
1504                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1505                 .sf_qop         = 0,
1506                 .sf_service     = SPTLRPC_SVC_NULL,
1507                 .sf_name        = "krb5n"
1508         },
1509         {
1510                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1511                 .sf_qop         = 0,
1512                 .sf_service     = SPTLRPC_SVC_AUTH,
1513                 .sf_name        = "krb5a"
1514         },
1515         {
1516                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1517                 .sf_qop         = 0,
1518                 .sf_service     = SPTLRPC_SVC_INTG,
1519                 .sf_name        = "krb5i"
1520         },
1521         {
1522                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1523                 .sf_qop         = 0,
1524                 .sf_service     = SPTLRPC_SVC_PRIV,
1525                 .sf_name        = "krb5p"
1526         },
1527 };
1528
1529 static struct gss_api_mech gss_kerberos_mech = {
1530         /* .gm_owner uses default NULL value for THIS_MODULE */
1531         .gm_name        = "krb5",
1532         .gm_oid         = (rawobj_t)
1533                                 {9, "\052\206\110\206\367\022\001\002\002"},
1534         .gm_ops         = &gss_kerberos_ops,
1535         .gm_sf_num      = 4,
1536         .gm_sfs         = gss_kerberos_sfs,
1537 };
1538
1539 int __init init_kerberos_module(void)
1540 {
1541         int status;
1542
1543         spin_lock_init(&krb5_seq_lock);
1544
1545         status = lgss_mech_register(&gss_kerberos_mech);
1546         if (status)
1547                 CERROR("Failed to register kerberos gss mechanism!\n");
1548         return status;
1549 }
1550
1551 void cleanup_kerberos_module(void)
1552 {
1553         lgss_mech_unregister(&gss_kerberos_mech);
1554 }