Whamcloud - gitweb
LU-3289 gss: Add userspace support for GSS null and sk
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, 2015, Intel Corporation.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/crypto.h>
56 #include <linux/mutex.h>
57
58 #include <obd.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre/lustre_idl.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
65
66 #include "gss_err.h"
67 #include "gss_internal.h"
68 #include "gss_api.h"
69 #include "gss_asn1.h"
70 #include "gss_krb5.h"
71 #include "gss_crypto.h"
72
73 static spinlock_t krb5_seq_lock;
74
75 struct krb5_enctype {
76         char           *ke_dispname;
77         char           *ke_enc_name;            /* linux tfm name */
78         char           *ke_hash_name;           /* linux tfm name */
79         int             ke_enc_mode;            /* linux tfm mode */
80         int             ke_hash_size;           /* checksum size */
81         int             ke_conf_size;           /* confounder size */
82         unsigned int    ke_hash_hmac:1;         /* is hmac? */
83 };
84
85 /*
86  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
87  * but currently we simply CBC with padding, because linux doesn't support CTS
88  * yet. this need to be fixed in the future.
89  */
90 static struct krb5_enctype enctypes[] = {
91         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
92                 "des-cbc-md5",
93                 "cbc(des)",
94                 "md5",
95                 0,
96                 16,
97                 8,
98                 0,
99         },
100         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
101                 "des3-hmac-sha1",
102                 "cbc(des3_ede)",
103                 "hmac(sha1)",
104                 0,
105                 20,
106                 8,
107                 1,
108         },
109         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
110                 "aes128-cts-hmac-sha1-96",
111                 "cbc(aes)",
112                 "hmac(sha1)",
113                 0,
114                 12,
115                 16,
116                 1,
117         },
118         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
119                 "aes256-cts-hmac-sha1-96",
120                 "cbc(aes)",
121                 "hmac(sha1)",
122                 0,
123                 12,
124                 16,
125                 1,
126         },
127         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
128                 "arcfour-hmac-md5",
129                 "ecb(arc4)",
130                 "hmac(md5)",
131                 0,
132                 16,
133                 8,
134                 1,
135         },
136 };
137
138 #define MAX_ENCTYPES    sizeof(enctypes)/sizeof(struct krb5_enctype)
139
140 static const char * enctype2str(__u32 enctype)
141 {
142         if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
143                 return enctypes[enctype].ke_dispname;
144
145         return "unknown";
146 }
147
148 static
149 int krb5_init_keys(struct krb5_ctx *kctx)
150 {
151         struct krb5_enctype *ke;
152
153         if (kctx->kc_enctype >= MAX_ENCTYPES ||
154             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
155                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
156                 return -1;
157         }
158
159         ke = &enctypes[kctx->kc_enctype];
160
161         /* tfm arc4 is stateful, user should alloc-use-free by his own */
162         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
163             gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
164                 return -1;
165
166         /* tfm hmac is stateful, user should alloc-use-free by his own */
167         if (ke->ke_hash_hmac == 0 &&
168             gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
169                 return -1;
170         if (ke->ke_hash_hmac == 0 &&
171             gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
172                 return -1;
173
174         return 0;
175 }
176
177 static
178 void delete_context_kerberos(struct krb5_ctx *kctx)
179 {
180         rawobj_free(&kctx->kc_mech_used);
181
182         gss_keyblock_free(&kctx->kc_keye);
183         gss_keyblock_free(&kctx->kc_keyi);
184         gss_keyblock_free(&kctx->kc_keyc);
185 }
186
187 static
188 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
189 {
190         unsigned int    tmp_uint, keysize;
191
192         /* seed_init flag */
193         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
194                 goto out_err;
195         kctx->kc_seed_init = (tmp_uint != 0);
196
197         /* seed */
198         if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
199                 goto out_err;
200
201         /* sign/seal algorithm, not really used now */
202         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
203             gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
204                 goto out_err;
205
206         /* end time */
207         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
208                 goto out_err;
209
210         /* seq send */
211         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
212                 goto out_err;
213         kctx->kc_seq_send = tmp_uint;
214
215         /* mech oid */
216         if (gss_get_rawobj(&p, end, &kctx->kc_mech_used))
217                 goto out_err;
218
219         /* old style enc/seq keys in format:
220          *   - enctype (u32)
221          *   - keysize (u32)
222          *   - keydata
223          * we decompose them to fit into the new context
224          */
225
226         /* enc key */
227         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
228                 goto out_err;
229
230         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
231                 goto out_err;
232
233         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
234                 goto out_err;
235
236         /* seq key */
237         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
238             tmp_uint != kctx->kc_enctype)
239                 goto out_err;
240
241         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
242             tmp_uint != keysize)
243                 goto out_err;
244
245         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
246                 goto out_err;
247
248         /* old style fallback */
249         if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
250                 goto out_err;
251
252         if (p != end)
253                 goto out_err;
254
255         CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
256         return 0;
257 out_err:
258         return GSS_S_FAILURE;
259 }
260
261 /* Flags for version 2 context flags */
262 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
263 #define KRB5_CTX_FLAG_CFX               0x00000002
264 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
265
266 static
267 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
268 {
269         unsigned int    tmp_uint, keysize;
270
271         /* end time */
272         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
273                 goto out_err;
274
275         /* flags */
276         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
277                 goto out_err;
278
279         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
280                 kctx->kc_initiate = 1;
281         if (tmp_uint & KRB5_CTX_FLAG_CFX)
282                 kctx->kc_cfx = 1;
283         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
284                 kctx->kc_have_acceptor_subkey = 1;
285
286         /* seq send */
287         if (gss_get_bytes(&p, end, &kctx->kc_seq_send,
288             sizeof(kctx->kc_seq_send)))
289                 goto out_err;
290
291         /* enctype */
292         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
293                 goto out_err;
294
295         /* size of each key */
296         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
297                 goto out_err;
298
299         /* number of keys - should always be 3 */
300         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
301                 goto out_err;
302
303         if (tmp_uint != 3) {
304                 CERROR("Invalid number of keys: %u\n", tmp_uint);
305                 goto out_err;
306         }
307
308         /* ke */
309         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
310                 goto out_err;
311         /* ki */
312         if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize))
313                 goto out_err;
314         /* ki */
315         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
316                 goto out_err;
317
318         CDEBUG(D_SEC, "successfully imported v2 context\n");
319         return 0;
320 out_err:
321         return GSS_S_FAILURE;
322 }
323
324 /*
325  * The whole purpose here is trying to keep user level gss context parsing
326  * from nfs-utils unchanged as possible as we can, they are not quite mature
327  * yet, and many stuff still not clear, like heimdal etc.
328  */
329 static
330 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
331                                       struct gss_ctx *gctx)
332 {
333         struct krb5_ctx *kctx;
334         char *p = (char *)inbuf->data;
335         char *end = (char *)(inbuf->data + inbuf->len);
336         unsigned int tmp_uint, rc;
337
338         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
339                 CERROR("Fail to read version\n");
340                 return GSS_S_FAILURE;
341         }
342
343         /* only support 0, 1 for the moment */
344         if (tmp_uint > 2) {
345                 CERROR("Invalid version %u\n", tmp_uint);
346                 return GSS_S_FAILURE;
347         }
348
349         OBD_ALLOC_PTR(kctx);
350         if (!kctx)
351                 return GSS_S_FAILURE;
352
353         if (tmp_uint == 0 || tmp_uint == 1) {
354                 kctx->kc_initiate = tmp_uint;
355                 rc = import_context_rfc1964(kctx, p, end);
356         } else {
357                 rc = import_context_rfc4121(kctx, p, end);
358         }
359
360         if (rc == 0)
361                 rc = krb5_init_keys(kctx);
362
363         if (rc) {
364                 delete_context_kerberos(kctx);
365                 OBD_FREE_PTR(kctx);
366
367                 return GSS_S_FAILURE;
368         }
369
370         gctx->internal_ctx_id = kctx;
371         return GSS_S_COMPLETE;
372 }
373
374 static
375 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
376                                         struct gss_ctx *gctx_new)
377 {
378         struct krb5_ctx *kctx = gctx->internal_ctx_id;
379         struct krb5_ctx *knew;
380
381         OBD_ALLOC_PTR(knew);
382         if (!knew)
383                 return GSS_S_FAILURE;
384
385         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
386         knew->kc_cfx = kctx->kc_cfx;
387         knew->kc_seed_init = kctx->kc_seed_init;
388         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
389         knew->kc_endtime = kctx->kc_endtime;
390
391         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
392         knew->kc_seq_send = kctx->kc_seq_recv;
393         knew->kc_seq_recv = kctx->kc_seq_send;
394         knew->kc_enctype = kctx->kc_enctype;
395
396         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
397                 goto out_err;
398
399         if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
400                 goto out_err;
401         if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
402                 goto out_err;
403         if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
404                 goto out_err;
405         if (krb5_init_keys(knew))
406                 goto out_err;
407
408         gctx_new->internal_ctx_id = knew;
409         CDEBUG(D_SEC, "successfully copied reverse context\n");
410         return GSS_S_COMPLETE;
411
412 out_err:
413         delete_context_kerberos(knew);
414         OBD_FREE_PTR(knew);
415         return GSS_S_FAILURE;
416 }
417
418 static
419 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
420                                    unsigned long  *endtime)
421 {
422         struct krb5_ctx *kctx = gctx->internal_ctx_id;
423
424         *endtime = (unsigned long)((__u32) kctx->kc_endtime);
425         return GSS_S_COMPLETE;
426 }
427
428 static
429 void gss_delete_sec_context_kerberos(void *internal_ctx)
430 {
431         struct krb5_ctx *kctx = internal_ctx;
432
433         delete_context_kerberos(kctx);
434         OBD_FREE_PTR(kctx);
435 }
436
437 /*
438  * compute (keyed/keyless) checksum against the plain text which appended
439  * with krb5 wire token header.
440  */
441 static
442 __s32 krb5_make_checksum(__u32 enctype,
443                          struct gss_keyblock *kb,
444                          struct krb5_header *khdr,
445                          int msgcnt, rawobj_t *msgs,
446                          int iovcnt, lnet_kiov_t *iovs,
447                          rawobj_t *cksum)
448 {
449         struct krb5_enctype   *ke = &enctypes[enctype];
450         struct crypto_hash    *tfm;
451         rawobj_t               hdr;
452         __u32                  code = GSS_S_FAILURE;
453         int                    rc;
454
455         if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
456                 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
457                 return GSS_S_FAILURE;
458         }
459
460         cksum->len = crypto_hash_digestsize(tfm);
461         OBD_ALLOC_LARGE(cksum->data, cksum->len);
462         if (!cksum->data) {
463                 cksum->len = 0;
464                 goto out_tfm;
465         }
466
467         hdr.data = (__u8 *)khdr;
468         hdr.len = sizeof(*khdr);
469
470         if (ke->ke_hash_hmac)
471                 rc = gss_digest_hmac(tfm, &kb->kb_key,
472                                      &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
473         else
474                 rc = gss_digest_norm(tfm, kb,
475                                      &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
476
477         if (rc == 0)
478                 code = GSS_S_COMPLETE;
479 out_tfm:
480         crypto_free_hash(tfm);
481         return code;
482 }
483
484 static void fill_krb5_header(struct krb5_ctx *kctx,
485                              struct krb5_header *khdr,
486                              int privacy)
487 {
488         unsigned char acceptor_flag;
489
490         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
491
492         if (privacy) {
493                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
494                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
495                 khdr->kh_ec = cpu_to_be16(0);
496                 khdr->kh_rrc = cpu_to_be16(0);
497         } else {
498                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
499                 khdr->kh_flags = acceptor_flag;
500                 khdr->kh_ec = cpu_to_be16(0xffff);
501                 khdr->kh_rrc = cpu_to_be16(0xffff);
502         }
503
504         khdr->kh_filler = 0xff;
505         spin_lock(&krb5_seq_lock);
506         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
507         spin_unlock(&krb5_seq_lock);
508 }
509
510 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
511                                 struct krb5_header *khdr,
512                                 int privacy)
513 {
514         unsigned char acceptor_flag;
515         __u16         tok_id, ec_rrc;
516
517         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
518
519         if (privacy) {
520                 tok_id = KG_TOK_WRAP_MSG;
521                 ec_rrc = 0x0;
522         } else {
523                 tok_id = KG_TOK_MIC_MSG;
524                 ec_rrc = 0xffff;
525         }
526
527         /* sanity checks */
528         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
529                 CERROR("bad token id\n");
530                 return GSS_S_DEFECTIVE_TOKEN;
531         }
532         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
533                 CERROR("bad direction flag\n");
534                 return GSS_S_BAD_SIG;
535         }
536         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
537                 CERROR("missing confidential flag\n");
538                 return GSS_S_BAD_SIG;
539         }
540         if (khdr->kh_filler != 0xff) {
541                 CERROR("bad filler\n");
542                 return GSS_S_DEFECTIVE_TOKEN;
543         }
544         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
545             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
546                 CERROR("bad EC or RRC\n");
547                 return GSS_S_DEFECTIVE_TOKEN;
548         }
549         return GSS_S_COMPLETE;
550 }
551
552 static
553 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
554                            int msgcnt,
555                            rawobj_t *msgs,
556                            int iovcnt,
557                            lnet_kiov_t *iovs,
558                            rawobj_t *token)
559 {
560         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
561         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
562         struct krb5_header  *khdr;
563         rawobj_t             cksum = RAWOBJ_EMPTY;
564
565         /* fill krb5 header */
566         LASSERT(token->len >= sizeof(*khdr));
567         khdr = (struct krb5_header *)token->data;
568         fill_krb5_header(kctx, khdr, 0);
569
570         /* checksum */
571         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
572                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
573                 return GSS_S_FAILURE;
574
575         LASSERT(cksum.len >= ke->ke_hash_size);
576         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
577         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
578                ke->ke_hash_size);
579
580         token->len = sizeof(*khdr) + ke->ke_hash_size;
581         rawobj_free(&cksum);
582         return GSS_S_COMPLETE;
583 }
584
585 static
586 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
587                               int msgcnt,
588                               rawobj_t *msgs,
589                               int iovcnt,
590                               lnet_kiov_t *iovs,
591                               rawobj_t *token)
592 {
593         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
594         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
595         struct krb5_header  *khdr;
596         rawobj_t             cksum = RAWOBJ_EMPTY;
597         __u32                major;
598
599         if (token->len < sizeof(*khdr)) {
600                 CERROR("short signature: %u\n", token->len);
601                 return GSS_S_DEFECTIVE_TOKEN;
602         }
603
604         khdr = (struct krb5_header *)token->data;
605
606         major = verify_krb5_header(kctx, khdr, 0);
607         if (major != GSS_S_COMPLETE) {
608                 CERROR("bad krb5 header\n");
609                 return major;
610         }
611
612         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
613                 CERROR("short signature: %u, require %d\n",
614                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
615                 return GSS_S_FAILURE;
616         }
617
618         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
619                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
620                 CERROR("failed to make checksum\n");
621                 return GSS_S_FAILURE;
622         }
623
624         LASSERT(cksum.len >= ke->ke_hash_size);
625         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
626                    ke->ke_hash_size)) {
627                 CERROR("checksum mismatch\n");
628                 rawobj_free(&cksum);
629                 return GSS_S_BAD_SIG;
630         }
631
632         rawobj_free(&cksum);
633         return GSS_S_COMPLETE;
634 }
635
636 /*
637  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
638  */
639 static
640 int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
641                       struct krb5_header *khdr,
642                       char *confounder,
643                       struct ptlrpc_bulk_desc *desc,
644                       rawobj_t *cipher,
645                       int adj_nob)
646 {
647         struct blkcipher_desc   ciph_desc;
648         __u8                    local_iv[16] = {0};
649         struct scatterlist      src, dst;
650         struct sg_table         sg_src, sg_dst;
651         int                     blocksize, i, rc, nob = 0;
652
653         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
654         LASSERT(desc->bd_iov_count);
655         LASSERT(GET_ENC_KIOV(desc));
656
657         blocksize = crypto_blkcipher_blocksize(tfm);
658         LASSERT(blocksize > 1);
659         LASSERT(cipher->len == blocksize + sizeof(*khdr));
660
661         ciph_desc.tfm  = tfm;
662         ciph_desc.info = local_iv;
663         ciph_desc.flags = 0;
664
665         /* encrypt confounder */
666         rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
667         if (rc != 0)
668                 return rc;
669
670         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
671         if (rc != 0) {
672                 gss_teardown_sgtable(&sg_src);
673                 return rc;
674         }
675
676         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
677                                          sg_src.sgl, blocksize);
678
679         gss_teardown_sgtable(&sg_dst);
680         gss_teardown_sgtable(&sg_src);
681
682         if (rc) {
683                 CERROR("error to encrypt confounder: %d\n", rc);
684                 return rc;
685         }
686
687         /* encrypt clear pages */
688         for (i = 0; i < desc->bd_iov_count; i++) {
689                 sg_init_table(&src, 1);
690                 sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
691                             (BD_GET_KIOV(desc, i).kiov_len +
692                                 blocksize - 1) &
693                             (~(blocksize - 1)),
694                             BD_GET_KIOV(desc, i).kiov_offset);
695                 if (adj_nob)
696                         nob += src.length;
697                 sg_init_table(&dst, 1);
698                 sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
699                             src.length, src.offset);
700
701                 BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
702                 BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
703
704                 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
705                                                     src.length);
706                 if (rc) {
707                         CERROR("error to encrypt page: %d\n", rc);
708                         return rc;
709                 }
710         }
711
712         /* encrypt krb5 header */
713         rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
714         if (rc != 0)
715                 return rc;
716
717         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
718                            sizeof(*khdr));
719         if (rc != 0) {
720                 gss_teardown_sgtable(&sg_src);
721                 return rc;
722         }
723
724         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
725                                          sizeof(*khdr));
726
727         gss_teardown_sgtable(&sg_dst);
728         gss_teardown_sgtable(&sg_src);
729
730         if (rc) {
731                 CERROR("error to encrypt krb5 header: %d\n", rc);
732                 return rc;
733         }
734
735         if (adj_nob)
736                 desc->bd_nob = nob;
737
738         return 0;
739 }
740
741 /*
742  * desc->bd_nob_transferred is the size of cipher text received.
743  * desc->bd_nob is the target size of plain text supposed to be.
744  *
745  * if adj_nob != 0, we adjust each page's kiov_len to the actual
746  * plain text size.
747  * - for client read: we don't know data size for each page, so
748  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
749  *   be smaller, so we need to adjust it according to
750  *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
751  *   this means we DO NOT support the situation that server send an odd size
752  *   data in a page which is not the last one.
753  * - for server write: we knows exactly data size for each page being expected,
754  *   thus kiov_len is accurate already, so we should not adjust it at all.
755  *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
756  *   round_up(bd_iov[]->kiov_len) which
757  *   should have been done by prep_bulk().
758  */
759 static
760 int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
761                       struct krb5_header *khdr,
762                       struct ptlrpc_bulk_desc *desc,
763                       rawobj_t *cipher,
764                       rawobj_t *plain,
765                       int adj_nob)
766 {
767         struct blkcipher_desc   ciph_desc;
768         __u8                    local_iv[16] = {0};
769         struct scatterlist      src, dst;
770         struct sg_table         sg_src, sg_dst;
771         int                     ct_nob = 0, pt_nob = 0;
772         int                     blocksize, i, rc;
773
774         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
775         LASSERT(desc->bd_iov_count);
776         LASSERT(GET_ENC_KIOV(desc));
777         LASSERT(desc->bd_nob_transferred);
778
779         blocksize = crypto_blkcipher_blocksize(tfm);
780         LASSERT(blocksize > 1);
781         LASSERT(cipher->len == blocksize + sizeof(*khdr));
782
783         ciph_desc.tfm  = tfm;
784         ciph_desc.info = local_iv;
785         ciph_desc.flags = 0;
786
787         if (desc->bd_nob_transferred % blocksize) {
788                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
789                 return -EPROTO;
790         }
791
792         /* decrypt head (confounder) */
793         rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
794         if (rc != 0)
795                 return rc;
796
797         rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
798         if (rc != 0) {
799                 gss_teardown_sgtable(&sg_src);
800                 return rc;
801         }
802
803         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
804                                          sg_src.sgl, blocksize);
805
806         gss_teardown_sgtable(&sg_dst);
807         gss_teardown_sgtable(&sg_src);
808
809         if (rc) {
810                 CERROR("error to decrypt confounder: %d\n", rc);
811                 return rc;
812         }
813
814         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
815              i++) {
816                 if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
817                     != 0 ||
818                     BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
819                     != 0) {
820                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
821                                i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
822                                BD_GET_ENC_KIOV(desc, i).kiov_len,
823                                blocksize);
824                         return -EFAULT;
825                 }
826
827                 if (adj_nob) {
828                         if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
829                             desc->bd_nob_transferred)
830                                 BD_GET_ENC_KIOV(desc, i).kiov_len =
831                                         desc->bd_nob_transferred - ct_nob;
832
833                         BD_GET_KIOV(desc, i).kiov_len =
834                           BD_GET_ENC_KIOV(desc, i).kiov_len;
835                         if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
836                             desc->bd_nob)
837                                 BD_GET_KIOV(desc, i).kiov_len =
838                                   desc->bd_nob - pt_nob;
839                 } else {
840                         /* this should be guaranteed by LNET */
841                         LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
842                                 kiov_len <=
843                                 desc->bd_nob_transferred);
844                         LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
845                                 BD_GET_ENC_KIOV(desc, i).kiov_len);
846                 }
847
848                 if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
849                         continue;
850
851                 sg_init_table(&src, 1);
852                 sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
853                             BD_GET_ENC_KIOV(desc, i).kiov_len,
854                             BD_GET_ENC_KIOV(desc, i).kiov_offset);
855                 dst = src;
856                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
857                         sg_assign_page(&dst,
858                                        BD_GET_KIOV(desc, i).kiov_page);
859
860                 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
861                                                  src.length);
862                 if (rc) {
863                         CERROR("error to decrypt page: %d\n", rc);
864                         return rc;
865                 }
866
867                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
868                         memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
869                                BD_GET_KIOV(desc, i).kiov_offset,
870                                page_address(BD_GET_ENC_KIOV(desc, i).
871                                             kiov_page) +
872                                BD_GET_KIOV(desc, i).kiov_offset,
873                                BD_GET_KIOV(desc, i).kiov_len);
874                 }
875
876                 ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
877                 pt_nob += BD_GET_KIOV(desc, i).kiov_len;
878         }
879
880         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
881                 CERROR("%d cipher text transferred but only %d decrypted\n",
882                        desc->bd_nob_transferred, ct_nob);
883                 return -EFAULT;
884         }
885
886         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
887                 CERROR("%d plain text expected but only %d received\n",
888                        desc->bd_nob, pt_nob);
889                 return -EFAULT;
890         }
891
892         /* if needed, clear up the rest unused iovs */
893         if (adj_nob)
894                 while (i < desc->bd_iov_count)
895                         BD_GET_KIOV(desc, i++).kiov_len = 0;
896
897         /* decrypt tail (krb5 header) */
898         rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
899                                sizeof(*khdr));
900         if (rc != 0)
901                 return rc;
902
903         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
904                                sizeof(*khdr));
905         if (rc != 0) {
906                 gss_teardown_sgtable(&sg_src);
907                 return rc;
908         }
909
910         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
911                                          sizeof(*khdr));
912
913         gss_teardown_sgtable(&sg_src);
914         gss_teardown_sgtable(&sg_dst);
915
916         if (rc) {
917                 CERROR("error to decrypt tail: %d\n", rc);
918                 return rc;
919         }
920
921         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
922                 CERROR("krb5 header doesn't match\n");
923                 return -EACCES;
924         }
925
926         return 0;
927 }
928
929 static
930 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
931                         rawobj_t *gsshdr,
932                         rawobj_t *msg,
933                         int msg_buflen,
934                         rawobj_t *token)
935 {
936         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
937         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
938         struct krb5_header  *khdr;
939         int                  blocksize;
940         rawobj_t             cksum = RAWOBJ_EMPTY;
941         rawobj_t             data_desc[3], cipher;
942         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
943         int                  rc = 0;
944
945         LASSERT(ke);
946         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
947         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
948                 ke->ke_conf_size >=
949                 crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
950
951         /*
952          * final token format:
953          * ---------------------------------------------------
954          * | krb5 header | cipher text | checksum (16 bytes) |
955          * ---------------------------------------------------
956          */
957
958         /* fill krb5 header */
959         LASSERT(token->len >= sizeof(*khdr));
960         khdr = (struct krb5_header *)token->data;
961         fill_krb5_header(kctx, khdr, 1);
962
963         /* generate confounder */
964         cfs_get_random_bytes(conf, ke->ke_conf_size);
965
966         /* get encryption blocksize. note kc_keye might not associated with
967          * a tfm, currently only for arcfour-hmac */
968         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
969                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
970                 blocksize = 1;
971         } else {
972                 LASSERT(kctx->kc_keye.kb_tfm);
973                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
974         }
975         LASSERT(blocksize <= ke->ke_conf_size);
976
977         /* padding the message */
978         if (gss_add_padding(msg, msg_buflen, blocksize))
979                 return GSS_S_FAILURE;
980
981         /*
982          * clear text layout for checksum:
983          * ------------------------------------------------------
984          * | confounder | gss header | clear msgs | krb5 header |
985          * ------------------------------------------------------
986          */
987         data_desc[0].data = conf;
988         data_desc[0].len = ke->ke_conf_size;
989         data_desc[1].data = gsshdr->data;
990         data_desc[1].len = gsshdr->len;
991         data_desc[2].data = msg->data;
992         data_desc[2].len = msg->len;
993
994         /* compute checksum */
995         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
996                                khdr, 3, data_desc, 0, NULL, &cksum))
997                 return GSS_S_FAILURE;
998         LASSERT(cksum.len >= ke->ke_hash_size);
999
1000         /*
1001          * clear text layout for encryption:
1002          * -----------------------------------------
1003          * | confounder | clear msgs | krb5 header |
1004          * -----------------------------------------
1005          */
1006         data_desc[0].data = conf;
1007         data_desc[0].len = ke->ke_conf_size;
1008         data_desc[1].data = msg->data;
1009         data_desc[1].len = msg->len;
1010         data_desc[2].data = (__u8 *) khdr;
1011         data_desc[2].len = sizeof(*khdr);
1012
1013         /* cipher text will be directly inplace */
1014         cipher.data = (__u8 *)(khdr + 1);
1015         cipher.len = token->len - sizeof(*khdr);
1016         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1017
1018         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1019                 rawobj_t                 arc4_keye;
1020                 struct crypto_blkcipher *arc4_tfm;
1021
1022                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1023                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1024                         CERROR("failed to obtain arc4 enc key\n");
1025                         GOTO(arc4_out, rc = -EACCES);
1026                 }
1027
1028                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1029                 if (IS_ERR(arc4_tfm)) {
1030                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1031                         GOTO(arc4_out_key, rc = -EACCES);
1032                 }
1033
1034                 if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1035                                                arc4_keye.len)) {
1036                         CERROR("failed to set arc4 key, len %d\n",
1037                                arc4_keye.len);
1038                         GOTO(arc4_out_tfm, rc = -EACCES);
1039                 }
1040
1041                 rc = gss_crypt_rawobjs(arc4_tfm, 1, 3, data_desc, &cipher, 1);
1042 arc4_out_tfm:
1043                 crypto_free_blkcipher(arc4_tfm);
1044 arc4_out_key:
1045                 rawobj_free(&arc4_keye);
1046 arc4_out:
1047                 do {} while(0); /* just to avoid compile warning */
1048         } else {
1049                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, 0, 3, data_desc,
1050                                        &cipher, 1);
1051         }
1052
1053         if (rc != 0) {
1054                 rawobj_free(&cksum);
1055                 return GSS_S_FAILURE;
1056         }
1057
1058         /* fill in checksum */
1059         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1060         memcpy((char *)(khdr + 1) + cipher.len,
1061                cksum.data + cksum.len - ke->ke_hash_size,
1062                ke->ke_hash_size);
1063         rawobj_free(&cksum);
1064
1065         /* final token length */
1066         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1067         return GSS_S_COMPLETE;
1068 }
1069
1070 static
1071 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1072                              struct ptlrpc_bulk_desc *desc)
1073 {
1074         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1075         int                  blocksize, i;
1076
1077         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1078         LASSERT(desc->bd_iov_count);
1079         LASSERT(GET_ENC_KIOV(desc));
1080         LASSERT(kctx->kc_keye.kb_tfm);
1081
1082         blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1083
1084         for (i = 0; i < desc->bd_iov_count; i++) {
1085                 LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
1086                 /*
1087                  * offset should always start at page boundary of either
1088                  * client or server side.
1089                  */
1090                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
1091                         CERROR("odd offset %d in page %d\n",
1092                                BD_GET_KIOV(desc, i).kiov_offset, i);
1093                         return GSS_S_FAILURE;
1094                 }
1095
1096                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
1097                         BD_GET_KIOV(desc, i).kiov_offset;
1098                 BD_GET_ENC_KIOV(desc, i).kiov_len =
1099                         (BD_GET_KIOV(desc, i).kiov_len +
1100                          blocksize - 1) & (~(blocksize - 1));
1101         }
1102
1103         return GSS_S_COMPLETE;
1104 }
1105
1106 static
1107 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1108                              struct ptlrpc_bulk_desc *desc,
1109                              rawobj_t *token, int adj_nob)
1110 {
1111         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1112         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1113         struct krb5_header  *khdr;
1114         int                  blocksize;
1115         rawobj_t             cksum = RAWOBJ_EMPTY;
1116         rawobj_t             data_desc[1], cipher;
1117         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1118         int                  rc = 0;
1119
1120         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1121         LASSERT(ke);
1122         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1123
1124         /*
1125          * final token format:
1126          * --------------------------------------------------
1127          * | krb5 header | head/tail cipher text | checksum |
1128          * --------------------------------------------------
1129          */
1130
1131         /* fill krb5 header */
1132         LASSERT(token->len >= sizeof(*khdr));
1133         khdr = (struct krb5_header *)token->data;
1134         fill_krb5_header(kctx, khdr, 1);
1135
1136         /* generate confounder */
1137         cfs_get_random_bytes(conf, ke->ke_conf_size);
1138
1139         /* get encryption blocksize. note kc_keye might not associated with
1140          * a tfm, currently only for arcfour-hmac */
1141         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1142                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1143                 blocksize = 1;
1144         } else {
1145                 LASSERT(kctx->kc_keye.kb_tfm);
1146                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1147         }
1148
1149         /*
1150          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1151          * the bulk token size would be exactly (sizeof(krb5_header) +
1152          * blocksize + sizeof(krb5_header) + hashsize)
1153          */
1154         LASSERT(blocksize <= ke->ke_conf_size);
1155         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1156         LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1157
1158         /*
1159          * clear text layout for checksum:
1160          * ------------------------------------------
1161          * | confounder | clear pages | krb5 header |
1162          * ------------------------------------------
1163          */
1164         data_desc[0].data = conf;
1165         data_desc[0].len = ke->ke_conf_size;
1166
1167         /* compute checksum */
1168         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1169                                khdr, 1, data_desc,
1170                                desc->bd_iov_count, GET_KIOV(desc),
1171                                &cksum))
1172                 return GSS_S_FAILURE;
1173         LASSERT(cksum.len >= ke->ke_hash_size);
1174
1175         /*
1176          * clear text layout for encryption:
1177          * ------------------------------------------
1178          * | confounder | clear pages | krb5 header |
1179          * ------------------------------------------
1180          *        |              |             |
1181          *        ----------  (cipher pages)   |
1182          * result token:   |                   |
1183          * -------------------------------------------
1184          * | krb5 header | cipher text | cipher text |
1185          * -------------------------------------------
1186          */
1187         data_desc[0].data = conf;
1188         data_desc[0].len = ke->ke_conf_size;
1189
1190         cipher.data = (__u8 *)(khdr + 1);
1191         cipher.len = blocksize + sizeof(*khdr);
1192
1193         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1194                 LBUG();
1195                 rc = 0;
1196         } else {
1197                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1198                                        conf, desc, &cipher, adj_nob);
1199         }
1200
1201         if (rc != 0) {
1202                 rawobj_free(&cksum);
1203                 return GSS_S_FAILURE;
1204         }
1205
1206         /* fill in checksum */
1207         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1208         memcpy((char *)(khdr + 1) + cipher.len,
1209                cksum.data + cksum.len - ke->ke_hash_size,
1210                ke->ke_hash_size);
1211         rawobj_free(&cksum);
1212
1213         /* final token length */
1214         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1215         return GSS_S_COMPLETE;
1216 }
1217
1218 static
1219 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1220                           rawobj_t        *gsshdr,
1221                           rawobj_t        *token,
1222                           rawobj_t        *msg)
1223 {
1224         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1225         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1226         struct krb5_header  *khdr;
1227         unsigned char       *tmpbuf;
1228         int                  blocksize, bodysize;
1229         rawobj_t             cksum = RAWOBJ_EMPTY;
1230         rawobj_t             cipher_in, plain_out;
1231         rawobj_t             hash_objs[3];
1232         int                  rc = 0;
1233         __u32                major;
1234
1235         LASSERT(ke);
1236
1237         if (token->len < sizeof(*khdr)) {
1238                 CERROR("short signature: %u\n", token->len);
1239                 return GSS_S_DEFECTIVE_TOKEN;
1240         }
1241
1242         khdr = (struct krb5_header *)token->data;
1243
1244         major = verify_krb5_header(kctx, khdr, 1);
1245         if (major != GSS_S_COMPLETE) {
1246                 CERROR("bad krb5 header\n");
1247                 return major;
1248         }
1249
1250         /* block size */
1251         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1252                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1253                 blocksize = 1;
1254         } else {
1255                 LASSERT(kctx->kc_keye.kb_tfm);
1256                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1257         }
1258
1259         /* expected token layout:
1260          * ----------------------------------------
1261          * | krb5 header | cipher text | checksum |
1262          * ----------------------------------------
1263          */
1264         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1265
1266         if (bodysize % blocksize) {
1267                 CERROR("odd bodysize %d\n", bodysize);
1268                 return GSS_S_DEFECTIVE_TOKEN;
1269         }
1270
1271         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1272                 CERROR("incomplete token: bodysize %d\n", bodysize);
1273                 return GSS_S_DEFECTIVE_TOKEN;
1274         }
1275
1276         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1277                 CERROR("buffer too small: %u, require %d\n",
1278                        msg->len, bodysize - ke->ke_conf_size);
1279                 return GSS_S_FAILURE;
1280         }
1281
1282         /* decrypting */
1283         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1284         if (!tmpbuf)
1285                 return GSS_S_FAILURE;
1286
1287         major = GSS_S_FAILURE;
1288
1289         cipher_in.data = (__u8 *)(khdr + 1);
1290         cipher_in.len = bodysize;
1291         plain_out.data = tmpbuf;
1292         plain_out.len = bodysize;
1293
1294         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1295                 rawobj_t                 arc4_keye;
1296                 struct crypto_blkcipher *arc4_tfm;
1297
1298                 cksum.data = token->data + token->len - ke->ke_hash_size;
1299                 cksum.len = ke->ke_hash_size;
1300
1301                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1302                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1303                         CERROR("failed to obtain arc4 enc key\n");
1304                         GOTO(arc4_out, rc = -EACCES);
1305                 }
1306
1307                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1308                 if (IS_ERR(arc4_tfm)) {
1309                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1310                         GOTO(arc4_out_key, rc = -EACCES);
1311                 }
1312
1313                 if (crypto_blkcipher_setkey(arc4_tfm,
1314                                          arc4_keye.data, arc4_keye.len)) {
1315                         CERROR("failed to set arc4 key, len %d\n",
1316                                arc4_keye.len);
1317                         GOTO(arc4_out_tfm, rc = -EACCES);
1318                 }
1319
1320                 rc = gss_crypt_rawobjs(arc4_tfm, 1, 1, &cipher_in,
1321                                        &plain_out, 0);
1322 arc4_out_tfm:
1323                 crypto_free_blkcipher(arc4_tfm);
1324 arc4_out_key:
1325                 rawobj_free(&arc4_keye);
1326 arc4_out:
1327                 cksum = RAWOBJ_EMPTY;
1328         } else {
1329                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, 0, 1, &cipher_in,
1330                                        &plain_out, 0);
1331         }
1332
1333         if (rc != 0) {
1334                 CERROR("error decrypt\n");
1335                 goto out_free;
1336         }
1337         LASSERT(plain_out.len == bodysize);
1338
1339         /* expected clear text layout:
1340          * -----------------------------------------
1341          * | confounder | clear msgs | krb5 header |
1342          * -----------------------------------------
1343          */
1344
1345         /* verify krb5 header in token is not modified */
1346         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1347                    sizeof(*khdr))) {
1348                 CERROR("decrypted krb5 header mismatch\n");
1349                 goto out_free;
1350         }
1351
1352         /* verify checksum, compose clear text as layout:
1353          * ------------------------------------------------------
1354          * | confounder | gss header | clear msgs | krb5 header |
1355          * ------------------------------------------------------
1356          */
1357         hash_objs[0].len = ke->ke_conf_size;
1358         hash_objs[0].data = plain_out.data;
1359         hash_objs[1].len = gsshdr->len;
1360         hash_objs[1].data = gsshdr->data;
1361         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1362         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1363         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1364                                khdr, 3, hash_objs, 0, NULL, &cksum))
1365                 goto out_free;
1366
1367         LASSERT(cksum.len >= ke->ke_hash_size);
1368         if (memcmp((char *)(khdr + 1) + bodysize,
1369                    cksum.data + cksum.len - ke->ke_hash_size,
1370                    ke->ke_hash_size)) {
1371                 CERROR("checksum mismatch\n");
1372                 goto out_free;
1373         }
1374
1375         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1376         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1377
1378         major = GSS_S_COMPLETE;
1379 out_free:
1380         OBD_FREE_LARGE(tmpbuf, bodysize);
1381         rawobj_free(&cksum);
1382         return major;
1383 }
1384
1385 static
1386 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1387                                struct ptlrpc_bulk_desc *desc,
1388                                rawobj_t *token, int adj_nob)
1389 {
1390         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1391         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1392         struct krb5_header  *khdr;
1393         int                  blocksize;
1394         rawobj_t             cksum = RAWOBJ_EMPTY;
1395         rawobj_t             cipher, plain;
1396         rawobj_t             data_desc[1];
1397         int                  rc;
1398         __u32                major;
1399
1400         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1401         LASSERT(ke);
1402
1403         if (token->len < sizeof(*khdr)) {
1404                 CERROR("short signature: %u\n", token->len);
1405                 return GSS_S_DEFECTIVE_TOKEN;
1406         }
1407
1408         khdr = (struct krb5_header *)token->data;
1409
1410         major = verify_krb5_header(kctx, khdr, 1);
1411         if (major != GSS_S_COMPLETE) {
1412                 CERROR("bad krb5 header\n");
1413                 return major;
1414         }
1415
1416         /* block size */
1417         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1418                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1419                 blocksize = 1;
1420                 LBUG();
1421         } else {
1422                 LASSERT(kctx->kc_keye.kb_tfm);
1423                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1424         }
1425         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1426
1427         /*
1428          * token format is expected as:
1429          * -----------------------------------------------
1430          * | krb5 header | head/tail cipher text | cksum |
1431          * -----------------------------------------------
1432          */
1433         if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1434                          ke->ke_hash_size) {
1435                 CERROR("short token size: %u\n", token->len);
1436                 return GSS_S_DEFECTIVE_TOKEN;
1437         }
1438
1439         cipher.data = (__u8 *) (khdr + 1);
1440         cipher.len = blocksize + sizeof(*khdr);
1441         plain.data = cipher.data;
1442         plain.len = cipher.len;
1443
1444         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1445                                desc, &cipher, &plain, adj_nob);
1446         if (rc)
1447                 return GSS_S_DEFECTIVE_TOKEN;
1448
1449         /*
1450          * verify checksum, compose clear text as layout:
1451          * ------------------------------------------
1452          * | confounder | clear pages | krb5 header |
1453          * ------------------------------------------
1454          */
1455         data_desc[0].data = plain.data;
1456         data_desc[0].len = blocksize;
1457
1458         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1459                                khdr, 1, data_desc,
1460                                desc->bd_iov_count,
1461                                GET_KIOV(desc),
1462                                &cksum))
1463                 return GSS_S_FAILURE;
1464         LASSERT(cksum.len >= ke->ke_hash_size);
1465
1466         if (memcmp(plain.data + blocksize + sizeof(*khdr),
1467                    cksum.data + cksum.len - ke->ke_hash_size,
1468                    ke->ke_hash_size)) {
1469                 CERROR("checksum mismatch\n");
1470                 rawobj_free(&cksum);
1471                 return GSS_S_BAD_SIG;
1472         }
1473
1474         rawobj_free(&cksum);
1475         return GSS_S_COMPLETE;
1476 }
1477
1478 int gss_display_kerberos(struct gss_ctx        *ctx,
1479                          char                  *buf,
1480                          int                    bufsize)
1481 {
1482         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1483         int                 written;
1484
1485         written = snprintf(buf, bufsize, "krb5 (%s)",
1486                            enctype2str(kctx->kc_enctype));
1487         return written;
1488 }
1489
1490 static struct gss_api_ops gss_kerberos_ops = {
1491         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1492         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1493         .gss_inquire_context        = gss_inquire_context_kerberos,
1494         .gss_get_mic                = gss_get_mic_kerberos,
1495         .gss_verify_mic             = gss_verify_mic_kerberos,
1496         .gss_wrap                   = gss_wrap_kerberos,
1497         .gss_unwrap                 = gss_unwrap_kerberos,
1498         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1499         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1500         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1501         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1502         .gss_display                = gss_display_kerberos,
1503 };
1504
1505 static struct subflavor_desc gss_kerberos_sfs[] = {
1506         {
1507                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1508                 .sf_qop         = 0,
1509                 .sf_service     = SPTLRPC_SVC_NULL,
1510                 .sf_name        = "krb5n"
1511         },
1512         {
1513                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1514                 .sf_qop         = 0,
1515                 .sf_service     = SPTLRPC_SVC_AUTH,
1516                 .sf_name        = "krb5a"
1517         },
1518         {
1519                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1520                 .sf_qop         = 0,
1521                 .sf_service     = SPTLRPC_SVC_INTG,
1522                 .sf_name        = "krb5i"
1523         },
1524         {
1525                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1526                 .sf_qop         = 0,
1527                 .sf_service     = SPTLRPC_SVC_PRIV,
1528                 .sf_name        = "krb5p"
1529         },
1530 };
1531
1532 /*
1533  * currently we leave module owner NULL
1534  */
1535 static struct gss_api_mech gss_kerberos_mech = {
1536         .gm_owner       = NULL, /*THIS_MODULE, */
1537         .gm_name        = "krb5",
1538         .gm_oid         = (rawobj_t)
1539                                 {9, "\052\206\110\206\367\022\001\002\002"},
1540         .gm_ops         = &gss_kerberos_ops,
1541         .gm_sf_num      = 4,
1542         .gm_sfs         = gss_kerberos_sfs,
1543 };
1544
1545 int __init init_kerberos_module(void)
1546 {
1547         int status;
1548
1549         spin_lock_init(&krb5_seq_lock);
1550
1551         status = lgss_mech_register(&gss_kerberos_mech);
1552         if (status)
1553                 CERROR("Failed to register kerberos gss mechanism!\n");
1554         return status;
1555 }
1556
1557 void cleanup_kerberos_module(void)
1558 {
1559         lgss_mech_unregister(&gss_kerberos_mech);
1560 }