Whamcloud - gitweb
7d7a06f610d45b22133bbef8b314a19fb392482f
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, 2015, Intel Corporation.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/crypto.h>
56 #include <linux/mutex.h>
57
58 #include <obd.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre/lustre_idl.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
65
66 #include "gss_err.h"
67 #include "gss_internal.h"
68 #include "gss_api.h"
69 #include "gss_asn1.h"
70 #include "gss_krb5.h"
71 #include "gss_crypto.h"
72
73 static spinlock_t krb5_seq_lock;
74
75 struct krb5_enctype {
76         char           *ke_dispname;
77         char           *ke_enc_name;            /* linux tfm name */
78         char           *ke_hash_name;           /* linux tfm name */
79         int             ke_enc_mode;            /* linux tfm mode */
80         int             ke_hash_size;           /* checksum size */
81         int             ke_conf_size;           /* confounder size */
82         unsigned int    ke_hash_hmac:1;         /* is hmac? */
83 };
84
85 /*
86  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
87  * but currently we simply CBC with padding, because linux doesn't support CTS
88  * yet. this need to be fixed in the future.
89  */
90 static struct krb5_enctype enctypes[] = {
91         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
92                 "des-cbc-md5",
93                 "cbc(des)",
94                 "md5",
95                 0,
96                 16,
97                 8,
98                 0,
99         },
100         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
101                 "des3-hmac-sha1",
102                 "cbc(des3_ede)",
103                 "hmac(sha1)",
104                 0,
105                 20,
106                 8,
107                 1,
108         },
109         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
110                 "aes128-cts-hmac-sha1-96",
111                 "cbc(aes)",
112                 "hmac(sha1)",
113                 0,
114                 12,
115                 16,
116                 1,
117         },
118         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
119                 "aes256-cts-hmac-sha1-96",
120                 "cbc(aes)",
121                 "hmac(sha1)",
122                 0,
123                 12,
124                 16,
125                 1,
126         },
127         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
128                 "arcfour-hmac-md5",
129                 "ecb(arc4)",
130                 "hmac(md5)",
131                 0,
132                 16,
133                 8,
134                 1,
135         },
136 };
137
138 #define MAX_ENCTYPES    sizeof(enctypes)/sizeof(struct krb5_enctype)
139
140 static const char * enctype2str(__u32 enctype)
141 {
142         if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
143                 return enctypes[enctype].ke_dispname;
144
145         return "unknown";
146 }
147
148 static
149 int krb5_init_keys(struct krb5_ctx *kctx)
150 {
151         struct krb5_enctype *ke;
152
153         if (kctx->kc_enctype >= MAX_ENCTYPES ||
154             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
155                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
156                 return -1;
157         }
158
159         ke = &enctypes[kctx->kc_enctype];
160
161         /* tfm arc4 is stateful, user should alloc-use-free by his own */
162         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
163             gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
164                 return -1;
165
166         /* tfm hmac is stateful, user should alloc-use-free by his own */
167         if (ke->ke_hash_hmac == 0 &&
168             gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
169                 return -1;
170         if (ke->ke_hash_hmac == 0 &&
171             gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
172                 return -1;
173
174         return 0;
175 }
176
177 static
178 void delete_context_kerberos(struct krb5_ctx *kctx)
179 {
180         rawobj_free(&kctx->kc_mech_used);
181
182         gss_keyblock_free(&kctx->kc_keye);
183         gss_keyblock_free(&kctx->kc_keyi);
184         gss_keyblock_free(&kctx->kc_keyc);
185 }
186
187 static
188 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
189 {
190         unsigned int    tmp_uint, keysize;
191
192         /* seed_init flag */
193         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
194                 goto out_err;
195         kctx->kc_seed_init = (tmp_uint != 0);
196
197         /* seed */
198         if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
199                 goto out_err;
200
201         /* sign/seal algorithm, not really used now */
202         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
203             gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
204                 goto out_err;
205
206         /* end time */
207         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
208                 goto out_err;
209
210         /* seq send */
211         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
212                 goto out_err;
213         kctx->kc_seq_send = tmp_uint;
214
215         /* mech oid */
216         if (gss_get_rawobj(&p, end, &kctx->kc_mech_used))
217                 goto out_err;
218
219         /* old style enc/seq keys in format:
220          *   - enctype (u32)
221          *   - keysize (u32)
222          *   - keydata
223          * we decompose them to fit into the new context
224          */
225
226         /* enc key */
227         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
228                 goto out_err;
229
230         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
231                 goto out_err;
232
233         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
234                 goto out_err;
235
236         /* seq key */
237         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
238             tmp_uint != kctx->kc_enctype)
239                 goto out_err;
240
241         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
242             tmp_uint != keysize)
243                 goto out_err;
244
245         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
246                 goto out_err;
247
248         /* old style fallback */
249         if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
250                 goto out_err;
251
252         if (p != end)
253                 goto out_err;
254
255         CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
256         return 0;
257 out_err:
258         return GSS_S_FAILURE;
259 }
260
261 /* Flags for version 2 context flags */
262 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
263 #define KRB5_CTX_FLAG_CFX               0x00000002
264 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
265
266 static
267 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
268 {
269         unsigned int    tmp_uint, keysize;
270
271         /* end time */
272         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
273                 goto out_err;
274
275         /* flags */
276         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
277                 goto out_err;
278
279         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
280                 kctx->kc_initiate = 1;
281         if (tmp_uint & KRB5_CTX_FLAG_CFX)
282                 kctx->kc_cfx = 1;
283         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
284                 kctx->kc_have_acceptor_subkey = 1;
285
286         /* seq send */
287         if (gss_get_bytes(&p, end, &kctx->kc_seq_send,
288             sizeof(kctx->kc_seq_send)))
289                 goto out_err;
290
291         /* enctype */
292         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
293                 goto out_err;
294
295         /* size of each key */
296         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
297                 goto out_err;
298
299         /* number of keys - should always be 3 */
300         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
301                 goto out_err;
302
303         if (tmp_uint != 3) {
304                 CERROR("Invalid number of keys: %u\n", tmp_uint);
305                 goto out_err;
306         }
307
308         /* ke */
309         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
310                 goto out_err;
311         /* ki */
312         if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize))
313                 goto out_err;
314         /* ki */
315         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
316                 goto out_err;
317
318         CDEBUG(D_SEC, "successfully imported v2 context\n");
319         return 0;
320 out_err:
321         return GSS_S_FAILURE;
322 }
323
324 /*
325  * The whole purpose here is trying to keep user level gss context parsing
326  * from nfs-utils unchanged as possible as we can, they are not quite mature
327  * yet, and many stuff still not clear, like heimdal etc.
328  */
329 static
330 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
331                                       struct gss_ctx *gctx)
332 {
333         struct krb5_ctx *kctx;
334         char *p = (char *)inbuf->data;
335         char *end = (char *)(inbuf->data + inbuf->len);
336         unsigned int tmp_uint, rc;
337
338         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
339                 CERROR("Fail to read version\n");
340                 return GSS_S_FAILURE;
341         }
342
343         /* only support 0, 1 for the moment */
344         if (tmp_uint > 2) {
345                 CERROR("Invalid version %u\n", tmp_uint);
346                 return GSS_S_FAILURE;
347         }
348
349         OBD_ALLOC_PTR(kctx);
350         if (!kctx)
351                 return GSS_S_FAILURE;
352
353         if (tmp_uint == 0 || tmp_uint == 1) {
354                 kctx->kc_initiate = tmp_uint;
355                 rc = import_context_rfc1964(kctx, p, end);
356         } else {
357                 rc = import_context_rfc4121(kctx, p, end);
358         }
359
360         if (rc == 0)
361                 rc = krb5_init_keys(kctx);
362
363         if (rc) {
364                 delete_context_kerberos(kctx);
365                 OBD_FREE_PTR(kctx);
366
367                 return GSS_S_FAILURE;
368         }
369
370         gctx->internal_ctx_id = kctx;
371         return GSS_S_COMPLETE;
372 }
373
374 static
375 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
376                                         struct gss_ctx *gctx_new)
377 {
378         struct krb5_ctx *kctx = gctx->internal_ctx_id;
379         struct krb5_ctx *knew;
380
381         OBD_ALLOC_PTR(knew);
382         if (!knew)
383                 return GSS_S_FAILURE;
384
385         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
386         knew->kc_cfx = kctx->kc_cfx;
387         knew->kc_seed_init = kctx->kc_seed_init;
388         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
389         knew->kc_endtime = kctx->kc_endtime;
390
391         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
392         knew->kc_seq_send = kctx->kc_seq_recv;
393         knew->kc_seq_recv = kctx->kc_seq_send;
394         knew->kc_enctype = kctx->kc_enctype;
395
396         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
397                 goto out_err;
398
399         if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
400                 goto out_err;
401         if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
402                 goto out_err;
403         if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
404                 goto out_err;
405         if (krb5_init_keys(knew))
406                 goto out_err;
407
408         gctx_new->internal_ctx_id = knew;
409         CDEBUG(D_SEC, "successfully copied reverse context\n");
410         return GSS_S_COMPLETE;
411
412 out_err:
413         delete_context_kerberos(knew);
414         OBD_FREE_PTR(knew);
415         return GSS_S_FAILURE;
416 }
417
418 static
419 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
420                                    unsigned long  *endtime)
421 {
422         struct krb5_ctx *kctx = gctx->internal_ctx_id;
423
424         *endtime = (unsigned long)((__u32) kctx->kc_endtime);
425         return GSS_S_COMPLETE;
426 }
427
428 static
429 void gss_delete_sec_context_kerberos(void *internal_ctx)
430 {
431         struct krb5_ctx *kctx = internal_ctx;
432
433         delete_context_kerberos(kctx);
434         OBD_FREE_PTR(kctx);
435 }
436
437 /*
438  * compute (keyed/keyless) checksum against the plain text which appended
439  * with krb5 wire token header.
440  */
441 static
442 __s32 krb5_make_checksum(__u32 enctype,
443                          struct gss_keyblock *kb,
444                          struct krb5_header *khdr,
445                          int msgcnt, rawobj_t *msgs,
446                          int iovcnt, lnet_kiov_t *iovs,
447                          rawobj_t *cksum)
448 {
449         struct krb5_enctype   *ke = &enctypes[enctype];
450         struct crypto_hash    *tfm;
451         rawobj_t               hdr;
452         __u32                  code = GSS_S_FAILURE;
453         int                    rc;
454
455         if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
456                 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
457                 return GSS_S_FAILURE;
458         }
459
460         cksum->len = crypto_hash_digestsize(tfm);
461         OBD_ALLOC_LARGE(cksum->data, cksum->len);
462         if (!cksum->data) {
463                 cksum->len = 0;
464                 goto out_tfm;
465         }
466
467         hdr.data = (__u8 *)khdr;
468         hdr.len = sizeof(*khdr);
469
470         if (ke->ke_hash_hmac)
471                 rc = gss_digest_hmac(tfm, &kb->kb_key,
472                                      &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
473         else
474                 rc = gss_digest_norm(tfm, kb,
475                                      &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
476
477         if (rc == 0)
478                 code = GSS_S_COMPLETE;
479 out_tfm:
480         crypto_free_hash(tfm);
481         return code;
482 }
483
484 static void fill_krb5_header(struct krb5_ctx *kctx,
485                              struct krb5_header *khdr,
486                              int privacy)
487 {
488         unsigned char acceptor_flag;
489
490         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
491
492         if (privacy) {
493                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
494                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
495                 khdr->kh_ec = cpu_to_be16(0);
496                 khdr->kh_rrc = cpu_to_be16(0);
497         } else {
498                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
499                 khdr->kh_flags = acceptor_flag;
500                 khdr->kh_ec = cpu_to_be16(0xffff);
501                 khdr->kh_rrc = cpu_to_be16(0xffff);
502         }
503
504         khdr->kh_filler = 0xff;
505         spin_lock(&krb5_seq_lock);
506         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
507         spin_unlock(&krb5_seq_lock);
508 }
509
510 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
511                                 struct krb5_header *khdr,
512                                 int privacy)
513 {
514         unsigned char acceptor_flag;
515         __u16         tok_id, ec_rrc;
516
517         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
518
519         if (privacy) {
520                 tok_id = KG_TOK_WRAP_MSG;
521                 ec_rrc = 0x0;
522         } else {
523                 tok_id = KG_TOK_MIC_MSG;
524                 ec_rrc = 0xffff;
525         }
526
527         /* sanity checks */
528         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
529                 CERROR("bad token id\n");
530                 return GSS_S_DEFECTIVE_TOKEN;
531         }
532         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
533                 CERROR("bad direction flag\n");
534                 return GSS_S_BAD_SIG;
535         }
536         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
537                 CERROR("missing confidential flag\n");
538                 return GSS_S_BAD_SIG;
539         }
540         if (khdr->kh_filler != 0xff) {
541                 CERROR("bad filler\n");
542                 return GSS_S_DEFECTIVE_TOKEN;
543         }
544         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
545             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
546                 CERROR("bad EC or RRC\n");
547                 return GSS_S_DEFECTIVE_TOKEN;
548         }
549         return GSS_S_COMPLETE;
550 }
551
552 static
553 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
554                            int msgcnt,
555                            rawobj_t *msgs,
556                            int iovcnt,
557                            lnet_kiov_t *iovs,
558                            rawobj_t *token)
559 {
560         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
561         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
562         struct krb5_header  *khdr;
563         rawobj_t             cksum = RAWOBJ_EMPTY;
564
565         /* fill krb5 header */
566         LASSERT(token->len >= sizeof(*khdr));
567         khdr = (struct krb5_header *)token->data;
568         fill_krb5_header(kctx, khdr, 0);
569
570         /* checksum */
571         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
572                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
573                 return GSS_S_FAILURE;
574
575         LASSERT(cksum.len >= ke->ke_hash_size);
576         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
577         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
578                ke->ke_hash_size);
579
580         token->len = sizeof(*khdr) + ke->ke_hash_size;
581         rawobj_free(&cksum);
582         return GSS_S_COMPLETE;
583 }
584
585 static
586 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
587                               int msgcnt,
588                               rawobj_t *msgs,
589                               int iovcnt,
590                               lnet_kiov_t *iovs,
591                               rawobj_t *token)
592 {
593         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
594         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
595         struct krb5_header  *khdr;
596         rawobj_t             cksum = RAWOBJ_EMPTY;
597         __u32                major;
598
599         if (token->len < sizeof(*khdr)) {
600                 CERROR("short signature: %u\n", token->len);
601                 return GSS_S_DEFECTIVE_TOKEN;
602         }
603
604         khdr = (struct krb5_header *)token->data;
605
606         major = verify_krb5_header(kctx, khdr, 0);
607         if (major != GSS_S_COMPLETE) {
608                 CERROR("bad krb5 header\n");
609                 return major;
610         }
611
612         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
613                 CERROR("short signature: %u, require %d\n",
614                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
615                 return GSS_S_FAILURE;
616         }
617
618         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
619                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
620                 CERROR("failed to make checksum\n");
621                 return GSS_S_FAILURE;
622         }
623
624         LASSERT(cksum.len >= ke->ke_hash_size);
625         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
626                    ke->ke_hash_size)) {
627                 CERROR("checksum mismatch\n");
628                 rawobj_free(&cksum);
629                 return GSS_S_BAD_SIG;
630         }
631
632         rawobj_free(&cksum);
633         return GSS_S_COMPLETE;
634 }
635
636 /*
637  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
638  */
639 static
640 int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
641                       struct krb5_header *khdr,
642                       char *confounder,
643                       struct ptlrpc_bulk_desc *desc,
644                       rawobj_t *cipher,
645                       int adj_nob)
646 {
647         struct blkcipher_desc   ciph_desc;
648         __u8                    local_iv[16] = {0};
649         struct scatterlist      src, dst;
650         struct sg_table         sg_src, sg_dst;
651         int                     blocksize, i, rc, nob = 0;
652
653         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
654         LASSERT(desc->bd_iov_count);
655         LASSERT(GET_ENC_KIOV(desc));
656
657         blocksize = crypto_blkcipher_blocksize(tfm);
658         LASSERT(blocksize > 1);
659         LASSERT(cipher->len == blocksize + sizeof(*khdr));
660
661         ciph_desc.tfm  = tfm;
662         ciph_desc.info = local_iv;
663         ciph_desc.flags = 0;
664
665         /* encrypt confounder */
666         rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
667         if (rc != 0)
668                 return rc;
669
670         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
671         if (rc != 0) {
672                 gss_teardown_sgtable(&sg_src);
673                 return rc;
674         }
675
676         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
677                                          sg_src.sgl, blocksize);
678
679         gss_teardown_sgtable(&sg_dst);
680         gss_teardown_sgtable(&sg_src);
681
682         if (rc) {
683                 CERROR("error to encrypt confounder: %d\n", rc);
684                 return rc;
685         }
686
687         /* encrypt clear pages */
688         for (i = 0; i < desc->bd_iov_count; i++) {
689                 sg_init_table(&src, 1);
690                 sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
691                             (BD_GET_KIOV(desc, i).kiov_len +
692                                 blocksize - 1) &
693                             (~(blocksize - 1)),
694                             BD_GET_KIOV(desc, i).kiov_offset);
695                 if (adj_nob)
696                         nob += src.length;
697                 sg_init_table(&dst, 1);
698                 sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
699                             src.length, src.offset);
700
701                 BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
702                 BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
703
704                 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
705                                                     src.length);
706                 if (rc) {
707                         CERROR("error to encrypt page: %d\n", rc);
708                         return rc;
709                 }
710         }
711
712         /* encrypt krb5 header */
713         rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
714         if (rc != 0)
715                 return rc;
716
717         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
718                            sizeof(*khdr));
719         if (rc != 0) {
720                 gss_teardown_sgtable(&sg_src);
721                 return rc;
722         }
723
724         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
725                                          sizeof(*khdr));
726
727         gss_teardown_sgtable(&sg_dst);
728         gss_teardown_sgtable(&sg_src);
729
730         if (rc) {
731                 CERROR("error to encrypt krb5 header: %d\n", rc);
732                 return rc;
733         }
734
735         if (adj_nob)
736                 desc->bd_nob = nob;
737
738         return 0;
739 }
740
741 /*
742  * desc->bd_nob_transferred is the size of cipher text received.
743  * desc->bd_nob is the target size of plain text supposed to be.
744  *
745  * if adj_nob != 0, we adjust each page's kiov_len to the actual
746  * plain text size.
747  * - for client read: we don't know data size for each page, so
748  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
749  *   be smaller, so we need to adjust it according to
750  *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
751  *   this means we DO NOT support the situation that server send an odd size
752  *   data in a page which is not the last one.
753  * - for server write: we knows exactly data size for each page being expected,
754  *   thus kiov_len is accurate already, so we should not adjust it at all.
755  *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
756  *   round_up(bd_iov[]->kiov_len) which
757  *   should have been done by prep_bulk().
758  */
759 static
760 int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
761                       struct krb5_header *khdr,
762                       struct ptlrpc_bulk_desc *desc,
763                       rawobj_t *cipher,
764                       rawobj_t *plain,
765                       int adj_nob)
766 {
767         struct blkcipher_desc   ciph_desc;
768         __u8                    local_iv[16] = {0};
769         struct scatterlist      src, dst;
770         struct sg_table         sg_src, sg_dst;
771         int                     ct_nob = 0, pt_nob = 0;
772         int                     blocksize, i, rc;
773
774         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
775         LASSERT(desc->bd_iov_count);
776         LASSERT(GET_ENC_KIOV(desc));
777         LASSERT(desc->bd_nob_transferred);
778
779         blocksize = crypto_blkcipher_blocksize(tfm);
780         LASSERT(blocksize > 1);
781         LASSERT(cipher->len == blocksize + sizeof(*khdr));
782
783         ciph_desc.tfm  = tfm;
784         ciph_desc.info = local_iv;
785         ciph_desc.flags = 0;
786
787         if (desc->bd_nob_transferred % blocksize) {
788                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
789                 return -EPROTO;
790         }
791
792         /* decrypt head (confounder) */
793         rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
794         if (rc != 0)
795                 return rc;
796
797         rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
798         if (rc != 0) {
799                 gss_teardown_sgtable(&sg_src);
800                 return rc;
801         }
802
803         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
804                                          sg_src.sgl, blocksize);
805
806         gss_teardown_sgtable(&sg_dst);
807         gss_teardown_sgtable(&sg_src);
808
809         if (rc) {
810                 CERROR("error to decrypt confounder: %d\n", rc);
811                 return rc;
812         }
813
814         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
815              i++) {
816                 if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
817                     != 0 ||
818                     BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
819                     != 0) {
820                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
821                                i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
822                                BD_GET_ENC_KIOV(desc, i).kiov_len,
823                                blocksize);
824                         return -EFAULT;
825                 }
826
827                 if (adj_nob) {
828                         if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
829                             desc->bd_nob_transferred)
830                                 BD_GET_ENC_KIOV(desc, i).kiov_len =
831                                         desc->bd_nob_transferred - ct_nob;
832
833                         BD_GET_KIOV(desc, i).kiov_len =
834                           BD_GET_ENC_KIOV(desc, i).kiov_len;
835                         if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
836                             desc->bd_nob)
837                                 BD_GET_KIOV(desc, i).kiov_len =
838                                   desc->bd_nob - pt_nob;
839                 } else {
840                         /* this should be guaranteed by LNET */
841                         LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
842                                 kiov_len <=
843                                 desc->bd_nob_transferred);
844                         LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
845                                 BD_GET_ENC_KIOV(desc, i).kiov_len);
846                 }
847
848                 if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
849                         continue;
850
851                 sg_init_table(&src, 1);
852                 sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
853                             BD_GET_ENC_KIOV(desc, i).kiov_len,
854                             BD_GET_ENC_KIOV(desc, i).kiov_offset);
855                 dst = src;
856                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
857                         sg_assign_page(&dst,
858                                        BD_GET_KIOV(desc, i).kiov_page);
859
860                 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
861                                                  src.length);
862                 if (rc) {
863                         CERROR("error to decrypt page: %d\n", rc);
864                         return rc;
865                 }
866
867                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
868                         memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
869                                BD_GET_KIOV(desc, i).kiov_offset,
870                                page_address(BD_GET_ENC_KIOV(desc, i).
871                                             kiov_page) +
872                                BD_GET_KIOV(desc, i).kiov_offset,
873                                BD_GET_KIOV(desc, i).kiov_len);
874                 }
875
876                 ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
877                 pt_nob += BD_GET_KIOV(desc, i).kiov_len;
878         }
879
880         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
881                 CERROR("%d cipher text transferred but only %d decrypted\n",
882                        desc->bd_nob_transferred, ct_nob);
883                 return -EFAULT;
884         }
885
886         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
887                 CERROR("%d plain text expected but only %d received\n",
888                        desc->bd_nob, pt_nob);
889                 return -EFAULT;
890         }
891
892         /* if needed, clear up the rest unused iovs */
893         if (adj_nob)
894                 while (i < desc->bd_iov_count)
895                         BD_GET_KIOV(desc, i++).kiov_len = 0;
896
897         /* decrypt tail (krb5 header) */
898         rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
899                                sizeof(*khdr));
900         if (rc != 0)
901                 return rc;
902
903         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
904                                sizeof(*khdr));
905         if (rc != 0) {
906                 gss_teardown_sgtable(&sg_src);
907                 return rc;
908         }
909
910         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
911                                          sizeof(*khdr));
912
913         gss_teardown_sgtable(&sg_src);
914         gss_teardown_sgtable(&sg_dst);
915
916         if (rc) {
917                 CERROR("error to decrypt tail: %d\n", rc);
918                 return rc;
919         }
920
921         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
922                 CERROR("krb5 header doesn't match\n");
923                 return -EACCES;
924         }
925
926         return 0;
927 }
928
929 static
930 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
931                         rawobj_t *gsshdr,
932                         rawobj_t *msg,
933                         int msg_buflen,
934                         rawobj_t *token)
935 {
936         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
937         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
938         struct krb5_header  *khdr;
939         int                  blocksize;
940         rawobj_t             cksum = RAWOBJ_EMPTY;
941         rawobj_t             data_desc[3], cipher;
942         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
943         __u8                 local_iv[16] = {0};
944         int                  rc = 0;
945
946         LASSERT(ke);
947         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
948         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
949                 ke->ke_conf_size >=
950                 crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
951
952         /*
953          * final token format:
954          * ---------------------------------------------------
955          * | krb5 header | cipher text | checksum (16 bytes) |
956          * ---------------------------------------------------
957          */
958
959         /* fill krb5 header */
960         LASSERT(token->len >= sizeof(*khdr));
961         khdr = (struct krb5_header *)token->data;
962         fill_krb5_header(kctx, khdr, 1);
963
964         /* generate confounder */
965         cfs_get_random_bytes(conf, ke->ke_conf_size);
966
967         /* get encryption blocksize. note kc_keye might not associated with
968          * a tfm, currently only for arcfour-hmac */
969         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
970                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
971                 blocksize = 1;
972         } else {
973                 LASSERT(kctx->kc_keye.kb_tfm);
974                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
975         }
976         LASSERT(blocksize <= ke->ke_conf_size);
977
978         /* padding the message */
979         if (gss_add_padding(msg, msg_buflen, blocksize))
980                 return GSS_S_FAILURE;
981
982         /*
983          * clear text layout for checksum:
984          * ------------------------------------------------------
985          * | confounder | gss header | clear msgs | krb5 header |
986          * ------------------------------------------------------
987          */
988         data_desc[0].data = conf;
989         data_desc[0].len = ke->ke_conf_size;
990         data_desc[1].data = gsshdr->data;
991         data_desc[1].len = gsshdr->len;
992         data_desc[2].data = msg->data;
993         data_desc[2].len = msg->len;
994
995         /* compute checksum */
996         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
997                                khdr, 3, data_desc, 0, NULL, &cksum))
998                 return GSS_S_FAILURE;
999         LASSERT(cksum.len >= ke->ke_hash_size);
1000
1001         /*
1002          * clear text layout for encryption:
1003          * -----------------------------------------
1004          * | confounder | clear msgs | krb5 header |
1005          * -----------------------------------------
1006          */
1007         data_desc[0].data = conf;
1008         data_desc[0].len = ke->ke_conf_size;
1009         data_desc[1].data = msg->data;
1010         data_desc[1].len = msg->len;
1011         data_desc[2].data = (__u8 *) khdr;
1012         data_desc[2].len = sizeof(*khdr);
1013
1014         /* cipher text will be directly inplace */
1015         cipher.data = (__u8 *)(khdr + 1);
1016         cipher.len = token->len - sizeof(*khdr);
1017         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1018
1019         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1020                 rawobj_t                 arc4_keye;
1021                 struct crypto_blkcipher *arc4_tfm;
1022
1023                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1024                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1025                         CERROR("failed to obtain arc4 enc key\n");
1026                         GOTO(arc4_out, rc = -EACCES);
1027                 }
1028
1029                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1030                 if (IS_ERR(arc4_tfm)) {
1031                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1032                         GOTO(arc4_out_key, rc = -EACCES);
1033                 }
1034
1035                 if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1036                                                arc4_keye.len)) {
1037                         CERROR("failed to set arc4 key, len %d\n",
1038                                arc4_keye.len);
1039                         GOTO(arc4_out_tfm, rc = -EACCES);
1040                 }
1041
1042                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
1043                                        &cipher, 1);
1044 arc4_out_tfm:
1045                 crypto_free_blkcipher(arc4_tfm);
1046 arc4_out_key:
1047                 rawobj_free(&arc4_keye);
1048 arc4_out:
1049                 do {} while(0); /* just to avoid compile warning */
1050         } else {
1051                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
1052                                        data_desc, &cipher, 1);
1053         }
1054
1055         if (rc != 0) {
1056                 rawobj_free(&cksum);
1057                 return GSS_S_FAILURE;
1058         }
1059
1060         /* fill in checksum */
1061         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1062         memcpy((char *)(khdr + 1) + cipher.len,
1063                cksum.data + cksum.len - ke->ke_hash_size,
1064                ke->ke_hash_size);
1065         rawobj_free(&cksum);
1066
1067         /* final token length */
1068         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1069         return GSS_S_COMPLETE;
1070 }
1071
1072 static
1073 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1074                              struct ptlrpc_bulk_desc *desc)
1075 {
1076         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1077         int                  blocksize, i;
1078
1079         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1080         LASSERT(desc->bd_iov_count);
1081         LASSERT(GET_ENC_KIOV(desc));
1082         LASSERT(kctx->kc_keye.kb_tfm);
1083
1084         blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1085
1086         for (i = 0; i < desc->bd_iov_count; i++) {
1087                 LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
1088                 /*
1089                  * offset should always start at page boundary of either
1090                  * client or server side.
1091                  */
1092                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
1093                         CERROR("odd offset %d in page %d\n",
1094                                BD_GET_KIOV(desc, i).kiov_offset, i);
1095                         return GSS_S_FAILURE;
1096                 }
1097
1098                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
1099                         BD_GET_KIOV(desc, i).kiov_offset;
1100                 BD_GET_ENC_KIOV(desc, i).kiov_len =
1101                         (BD_GET_KIOV(desc, i).kiov_len +
1102                          blocksize - 1) & (~(blocksize - 1));
1103         }
1104
1105         return GSS_S_COMPLETE;
1106 }
1107
1108 static
1109 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1110                              struct ptlrpc_bulk_desc *desc,
1111                              rawobj_t *token, int adj_nob)
1112 {
1113         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1114         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1115         struct krb5_header  *khdr;
1116         int                  blocksize;
1117         rawobj_t             cksum = RAWOBJ_EMPTY;
1118         rawobj_t             data_desc[1], cipher;
1119         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1120         int                  rc = 0;
1121
1122         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1123         LASSERT(ke);
1124         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1125
1126         /*
1127          * final token format:
1128          * --------------------------------------------------
1129          * | krb5 header | head/tail cipher text | checksum |
1130          * --------------------------------------------------
1131          */
1132
1133         /* fill krb5 header */
1134         LASSERT(token->len >= sizeof(*khdr));
1135         khdr = (struct krb5_header *)token->data;
1136         fill_krb5_header(kctx, khdr, 1);
1137
1138         /* generate confounder */
1139         cfs_get_random_bytes(conf, ke->ke_conf_size);
1140
1141         /* get encryption blocksize. note kc_keye might not associated with
1142          * a tfm, currently only for arcfour-hmac */
1143         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1144                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1145                 blocksize = 1;
1146         } else {
1147                 LASSERT(kctx->kc_keye.kb_tfm);
1148                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1149         }
1150
1151         /*
1152          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1153          * the bulk token size would be exactly (sizeof(krb5_header) +
1154          * blocksize + sizeof(krb5_header) + hashsize)
1155          */
1156         LASSERT(blocksize <= ke->ke_conf_size);
1157         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1158         LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1159
1160         /*
1161          * clear text layout for checksum:
1162          * ------------------------------------------
1163          * | confounder | clear pages | krb5 header |
1164          * ------------------------------------------
1165          */
1166         data_desc[0].data = conf;
1167         data_desc[0].len = ke->ke_conf_size;
1168
1169         /* compute checksum */
1170         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1171                                khdr, 1, data_desc,
1172                                desc->bd_iov_count, GET_KIOV(desc),
1173                                &cksum))
1174                 return GSS_S_FAILURE;
1175         LASSERT(cksum.len >= ke->ke_hash_size);
1176
1177         /*
1178          * clear text layout for encryption:
1179          * ------------------------------------------
1180          * | confounder | clear pages | krb5 header |
1181          * ------------------------------------------
1182          *        |              |             |
1183          *        ----------  (cipher pages)   |
1184          * result token:   |                   |
1185          * -------------------------------------------
1186          * | krb5 header | cipher text | cipher text |
1187          * -------------------------------------------
1188          */
1189         data_desc[0].data = conf;
1190         data_desc[0].len = ke->ke_conf_size;
1191
1192         cipher.data = (__u8 *)(khdr + 1);
1193         cipher.len = blocksize + sizeof(*khdr);
1194
1195         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1196                 LBUG();
1197                 rc = 0;
1198         } else {
1199                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1200                                        conf, desc, &cipher, adj_nob);
1201         }
1202
1203         if (rc != 0) {
1204                 rawobj_free(&cksum);
1205                 return GSS_S_FAILURE;
1206         }
1207
1208         /* fill in checksum */
1209         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1210         memcpy((char *)(khdr + 1) + cipher.len,
1211                cksum.data + cksum.len - ke->ke_hash_size,
1212                ke->ke_hash_size);
1213         rawobj_free(&cksum);
1214
1215         /* final token length */
1216         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1217         return GSS_S_COMPLETE;
1218 }
1219
1220 static
1221 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1222                           rawobj_t        *gsshdr,
1223                           rawobj_t        *token,
1224                           rawobj_t        *msg)
1225 {
1226         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1227         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1228         struct krb5_header  *khdr;
1229         unsigned char       *tmpbuf;
1230         int                  blocksize, bodysize;
1231         rawobj_t             cksum = RAWOBJ_EMPTY;
1232         rawobj_t             cipher_in, plain_out;
1233         rawobj_t             hash_objs[3];
1234         int                  rc = 0;
1235         __u32                major;
1236         __u8                 local_iv[16] = {0};
1237
1238         LASSERT(ke);
1239
1240         if (token->len < sizeof(*khdr)) {
1241                 CERROR("short signature: %u\n", token->len);
1242                 return GSS_S_DEFECTIVE_TOKEN;
1243         }
1244
1245         khdr = (struct krb5_header *)token->data;
1246
1247         major = verify_krb5_header(kctx, khdr, 1);
1248         if (major != GSS_S_COMPLETE) {
1249                 CERROR("bad krb5 header\n");
1250                 return major;
1251         }
1252
1253         /* block size */
1254         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1255                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1256                 blocksize = 1;
1257         } else {
1258                 LASSERT(kctx->kc_keye.kb_tfm);
1259                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1260         }
1261
1262         /* expected token layout:
1263          * ----------------------------------------
1264          * | krb5 header | cipher text | checksum |
1265          * ----------------------------------------
1266          */
1267         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1268
1269         if (bodysize % blocksize) {
1270                 CERROR("odd bodysize %d\n", bodysize);
1271                 return GSS_S_DEFECTIVE_TOKEN;
1272         }
1273
1274         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1275                 CERROR("incomplete token: bodysize %d\n", bodysize);
1276                 return GSS_S_DEFECTIVE_TOKEN;
1277         }
1278
1279         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1280                 CERROR("buffer too small: %u, require %d\n",
1281                        msg->len, bodysize - ke->ke_conf_size);
1282                 return GSS_S_FAILURE;
1283         }
1284
1285         /* decrypting */
1286         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1287         if (!tmpbuf)
1288                 return GSS_S_FAILURE;
1289
1290         major = GSS_S_FAILURE;
1291
1292         cipher_in.data = (__u8 *)(khdr + 1);
1293         cipher_in.len = bodysize;
1294         plain_out.data = tmpbuf;
1295         plain_out.len = bodysize;
1296
1297         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1298                 rawobj_t                 arc4_keye;
1299                 struct crypto_blkcipher *arc4_tfm;
1300
1301                 cksum.data = token->data + token->len - ke->ke_hash_size;
1302                 cksum.len = ke->ke_hash_size;
1303
1304                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1305                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1306                         CERROR("failed to obtain arc4 enc key\n");
1307                         GOTO(arc4_out, rc = -EACCES);
1308                 }
1309
1310                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1311                 if (IS_ERR(arc4_tfm)) {
1312                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1313                         GOTO(arc4_out_key, rc = -EACCES);
1314                 }
1315
1316                 if (crypto_blkcipher_setkey(arc4_tfm,
1317                                          arc4_keye.data, arc4_keye.len)) {
1318                         CERROR("failed to set arc4 key, len %d\n",
1319                                arc4_keye.len);
1320                         GOTO(arc4_out_tfm, rc = -EACCES);
1321                 }
1322
1323                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
1324                                        &plain_out, 0);
1325 arc4_out_tfm:
1326                 crypto_free_blkcipher(arc4_tfm);
1327 arc4_out_key:
1328                 rawobj_free(&arc4_keye);
1329 arc4_out:
1330                 cksum = RAWOBJ_EMPTY;
1331         } else {
1332                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1,
1333                                        &cipher_in, &plain_out, 0);
1334         }
1335
1336         if (rc != 0) {
1337                 CERROR("error decrypt\n");
1338                 goto out_free;
1339         }
1340         LASSERT(plain_out.len == bodysize);
1341
1342         /* expected clear text layout:
1343          * -----------------------------------------
1344          * | confounder | clear msgs | krb5 header |
1345          * -----------------------------------------
1346          */
1347
1348         /* verify krb5 header in token is not modified */
1349         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1350                    sizeof(*khdr))) {
1351                 CERROR("decrypted krb5 header mismatch\n");
1352                 goto out_free;
1353         }
1354
1355         /* verify checksum, compose clear text as layout:
1356          * ------------------------------------------------------
1357          * | confounder | gss header | clear msgs | krb5 header |
1358          * ------------------------------------------------------
1359          */
1360         hash_objs[0].len = ke->ke_conf_size;
1361         hash_objs[0].data = plain_out.data;
1362         hash_objs[1].len = gsshdr->len;
1363         hash_objs[1].data = gsshdr->data;
1364         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1365         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1366         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1367                                khdr, 3, hash_objs, 0, NULL, &cksum))
1368                 goto out_free;
1369
1370         LASSERT(cksum.len >= ke->ke_hash_size);
1371         if (memcmp((char *)(khdr + 1) + bodysize,
1372                    cksum.data + cksum.len - ke->ke_hash_size,
1373                    ke->ke_hash_size)) {
1374                 CERROR("checksum mismatch\n");
1375                 goto out_free;
1376         }
1377
1378         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1379         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1380
1381         major = GSS_S_COMPLETE;
1382 out_free:
1383         OBD_FREE_LARGE(tmpbuf, bodysize);
1384         rawobj_free(&cksum);
1385         return major;
1386 }
1387
1388 static
1389 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1390                                struct ptlrpc_bulk_desc *desc,
1391                                rawobj_t *token, int adj_nob)
1392 {
1393         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1394         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1395         struct krb5_header  *khdr;
1396         int                  blocksize;
1397         rawobj_t             cksum = RAWOBJ_EMPTY;
1398         rawobj_t             cipher, plain;
1399         rawobj_t             data_desc[1];
1400         int                  rc;
1401         __u32                major;
1402
1403         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1404         LASSERT(ke);
1405
1406         if (token->len < sizeof(*khdr)) {
1407                 CERROR("short signature: %u\n", token->len);
1408                 return GSS_S_DEFECTIVE_TOKEN;
1409         }
1410
1411         khdr = (struct krb5_header *)token->data;
1412
1413         major = verify_krb5_header(kctx, khdr, 1);
1414         if (major != GSS_S_COMPLETE) {
1415                 CERROR("bad krb5 header\n");
1416                 return major;
1417         }
1418
1419         /* block size */
1420         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1421                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1422                 blocksize = 1;
1423                 LBUG();
1424         } else {
1425                 LASSERT(kctx->kc_keye.kb_tfm);
1426                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1427         }
1428         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1429
1430         /*
1431          * token format is expected as:
1432          * -----------------------------------------------
1433          * | krb5 header | head/tail cipher text | cksum |
1434          * -----------------------------------------------
1435          */
1436         if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1437                          ke->ke_hash_size) {
1438                 CERROR("short token size: %u\n", token->len);
1439                 return GSS_S_DEFECTIVE_TOKEN;
1440         }
1441
1442         cipher.data = (__u8 *) (khdr + 1);
1443         cipher.len = blocksize + sizeof(*khdr);
1444         plain.data = cipher.data;
1445         plain.len = cipher.len;
1446
1447         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1448                                desc, &cipher, &plain, adj_nob);
1449         if (rc)
1450                 return GSS_S_DEFECTIVE_TOKEN;
1451
1452         /*
1453          * verify checksum, compose clear text as layout:
1454          * ------------------------------------------
1455          * | confounder | clear pages | krb5 header |
1456          * ------------------------------------------
1457          */
1458         data_desc[0].data = plain.data;
1459         data_desc[0].len = blocksize;
1460
1461         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1462                                khdr, 1, data_desc,
1463                                desc->bd_iov_count,
1464                                GET_KIOV(desc),
1465                                &cksum))
1466                 return GSS_S_FAILURE;
1467         LASSERT(cksum.len >= ke->ke_hash_size);
1468
1469         if (memcmp(plain.data + blocksize + sizeof(*khdr),
1470                    cksum.data + cksum.len - ke->ke_hash_size,
1471                    ke->ke_hash_size)) {
1472                 CERROR("checksum mismatch\n");
1473                 rawobj_free(&cksum);
1474                 return GSS_S_BAD_SIG;
1475         }
1476
1477         rawobj_free(&cksum);
1478         return GSS_S_COMPLETE;
1479 }
1480
1481 int gss_display_kerberos(struct gss_ctx        *ctx,
1482                          char                  *buf,
1483                          int                    bufsize)
1484 {
1485         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1486         int                 written;
1487
1488         written = snprintf(buf, bufsize, "krb5 (%s)",
1489                            enctype2str(kctx->kc_enctype));
1490         return written;
1491 }
1492
1493 static struct gss_api_ops gss_kerberos_ops = {
1494         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1495         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1496         .gss_inquire_context        = gss_inquire_context_kerberos,
1497         .gss_get_mic                = gss_get_mic_kerberos,
1498         .gss_verify_mic             = gss_verify_mic_kerberos,
1499         .gss_wrap                   = gss_wrap_kerberos,
1500         .gss_unwrap                 = gss_unwrap_kerberos,
1501         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1502         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1503         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1504         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1505         .gss_display                = gss_display_kerberos,
1506 };
1507
1508 static struct subflavor_desc gss_kerberos_sfs[] = {
1509         {
1510                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1511                 .sf_qop         = 0,
1512                 .sf_service     = SPTLRPC_SVC_NULL,
1513                 .sf_name        = "krb5n"
1514         },
1515         {
1516                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1517                 .sf_qop         = 0,
1518                 .sf_service     = SPTLRPC_SVC_AUTH,
1519                 .sf_name        = "krb5a"
1520         },
1521         {
1522                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1523                 .sf_qop         = 0,
1524                 .sf_service     = SPTLRPC_SVC_INTG,
1525                 .sf_name        = "krb5i"
1526         },
1527         {
1528                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1529                 .sf_qop         = 0,
1530                 .sf_service     = SPTLRPC_SVC_PRIV,
1531                 .sf_name        = "krb5p"
1532         },
1533 };
1534
1535 /*
1536  * currently we leave module owner NULL
1537  */
1538 static struct gss_api_mech gss_kerberos_mech = {
1539         .gm_owner       = NULL, /*THIS_MODULE, */
1540         .gm_name        = "krb5",
1541         .gm_oid         = (rawobj_t)
1542                                 {9, "\052\206\110\206\367\022\001\002\002"},
1543         .gm_ops         = &gss_kerberos_ops,
1544         .gm_sf_num      = 4,
1545         .gm_sfs         = gss_kerberos_sfs,
1546 };
1547
1548 int __init init_kerberos_module(void)
1549 {
1550         int status;
1551
1552         spin_lock_init(&krb5_seq_lock);
1553
1554         status = lgss_mech_register(&gss_kerberos_mech);
1555         if (status)
1556                 CERROR("Failed to register kerberos gss mechanism!\n");
1557         return status;
1558 }
1559
1560 void cleanup_kerberos_module(void)
1561 {
1562         lgss_mech_unregister(&gss_kerberos_mech);
1563 }