Whamcloud - gitweb
LU-8602 gss: Support GSS on linux 4.6+ kernels
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, 2015, Intel Corporation.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/crypto.h>
56 #include <linux/mutex.h>
57
58 #include <obd.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre/lustre_idl.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
65
66 #include "gss_err.h"
67 #include "gss_internal.h"
68 #include "gss_api.h"
69 #include "gss_asn1.h"
70 #include "gss_krb5.h"
71 #include "gss_crypto.h"
72
73 static spinlock_t krb5_seq_lock;
74
75 struct krb5_enctype {
76         char           *ke_dispname;
77         char           *ke_enc_name;            /* linux tfm name */
78         char           *ke_hash_name;           /* linux tfm name */
79         int             ke_enc_mode;            /* linux tfm mode */
80         int             ke_hash_size;           /* checksum size */
81         int             ke_conf_size;           /* confounder size */
82         unsigned int    ke_hash_hmac:1;         /* is hmac? */
83 };
84
85 /*
86  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
87  * but currently we simply CBC with padding, because linux doesn't support CTS
88  * yet. this need to be fixed in the future.
89  */
90 static struct krb5_enctype enctypes[] = {
91         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
92                 .ke_dispname    = "des-cbc-md5",
93                 .ke_enc_name    = "cbc(des)",
94                 .ke_hash_name   = "md5",
95                 .ke_hash_size   = 16,
96                 .ke_conf_size   = 8,
97         },
98         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
99                 .ke_dispname    = "des3-hmac-sha1",
100                 .ke_enc_name    = "cbc(des3_ede)",
101                 .ke_hash_name   = "hmac(sha1)",
102                 .ke_hash_size   = 20,
103                 .ke_conf_size   = 8,
104                 .ke_hash_hmac   = 1,
105         },
106         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
107                 .ke_dispname    = "aes128-cts-hmac-sha1-96",
108                 .ke_enc_name    = "cbc(aes)",
109                 .ke_hash_name   = "hmac(sha1)",
110                 .ke_hash_size   = 12,
111                 .ke_conf_size   = 16,
112                 .ke_hash_hmac   = 1,
113         },
114         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
115                 .ke_dispname    = "aes256-cts-hmac-sha1-96",
116                 .ke_enc_name    = "cbc(aes)",
117                 .ke_hash_name   = "hmac(sha1)",
118                 .ke_hash_size   = 12,
119                 .ke_conf_size   = 16,
120                 .ke_hash_hmac   = 1,
121         },
122         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
123                 .ke_dispname    = "arcfour-hmac-md5",
124                 .ke_enc_name    = "ecb(arc4)",
125                 .ke_hash_name   = "hmac(md5)",
126                 .ke_hash_size   = 16,
127                 .ke_conf_size   = 8,
128                 .ke_hash_hmac   = 1,
129         }
130 };
131
132 #define MAX_ENCTYPES    sizeof(enctypes)/sizeof(struct krb5_enctype)
133
134 static const char * enctype2str(__u32 enctype)
135 {
136         if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
137                 return enctypes[enctype].ke_dispname;
138
139         return "unknown";
140 }
141
142 static
143 int krb5_init_keys(struct krb5_ctx *kctx)
144 {
145         struct krb5_enctype *ke;
146
147         if (kctx->kc_enctype >= MAX_ENCTYPES ||
148             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
149                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
150                 return -1;
151         }
152
153         ke = &enctypes[kctx->kc_enctype];
154
155         /* tfm arc4 is stateful, user should alloc-use-free by his own */
156         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
157             gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
158                 return -1;
159
160         /* tfm hmac is stateful, user should alloc-use-free by his own */
161         if (ke->ke_hash_hmac == 0 &&
162             gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
163                 return -1;
164         if (ke->ke_hash_hmac == 0 &&
165             gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
166                 return -1;
167
168         return 0;
169 }
170
171 static
172 void delete_context_kerberos(struct krb5_ctx *kctx)
173 {
174         rawobj_free(&kctx->kc_mech_used);
175
176         gss_keyblock_free(&kctx->kc_keye);
177         gss_keyblock_free(&kctx->kc_keyi);
178         gss_keyblock_free(&kctx->kc_keyc);
179 }
180
181 static
182 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
183 {
184         unsigned int    tmp_uint, keysize;
185
186         /* seed_init flag */
187         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
188                 goto out_err;
189         kctx->kc_seed_init = (tmp_uint != 0);
190
191         /* seed */
192         if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
193                 goto out_err;
194
195         /* sign/seal algorithm, not really used now */
196         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
197             gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
198                 goto out_err;
199
200         /* end time */
201         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
202                 goto out_err;
203
204         /* seq send */
205         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
206                 goto out_err;
207         kctx->kc_seq_send = tmp_uint;
208
209         /* mech oid */
210         if (gss_get_rawobj(&p, end, &kctx->kc_mech_used))
211                 goto out_err;
212
213         /* old style enc/seq keys in format:
214          *   - enctype (u32)
215          *   - keysize (u32)
216          *   - keydata
217          * we decompose them to fit into the new context
218          */
219
220         /* enc key */
221         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
222                 goto out_err;
223
224         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
225                 goto out_err;
226
227         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
228                 goto out_err;
229
230         /* seq key */
231         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
232             tmp_uint != kctx->kc_enctype)
233                 goto out_err;
234
235         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
236             tmp_uint != keysize)
237                 goto out_err;
238
239         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
240                 goto out_err;
241
242         /* old style fallback */
243         if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
244                 goto out_err;
245
246         if (p != end)
247                 goto out_err;
248
249         CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
250         return 0;
251 out_err:
252         return GSS_S_FAILURE;
253 }
254
255 /* Flags for version 2 context flags */
256 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
257 #define KRB5_CTX_FLAG_CFX               0x00000002
258 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
259
260 static
261 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
262 {
263         unsigned int    tmp_uint, keysize;
264
265         /* end time */
266         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
267                 goto out_err;
268
269         /* flags */
270         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
271                 goto out_err;
272
273         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
274                 kctx->kc_initiate = 1;
275         if (tmp_uint & KRB5_CTX_FLAG_CFX)
276                 kctx->kc_cfx = 1;
277         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
278                 kctx->kc_have_acceptor_subkey = 1;
279
280         /* seq send */
281         if (gss_get_bytes(&p, end, &kctx->kc_seq_send,
282             sizeof(kctx->kc_seq_send)))
283                 goto out_err;
284
285         /* enctype */
286         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
287                 goto out_err;
288
289         /* size of each key */
290         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
291                 goto out_err;
292
293         /* number of keys - should always be 3 */
294         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
295                 goto out_err;
296
297         if (tmp_uint != 3) {
298                 CERROR("Invalid number of keys: %u\n", tmp_uint);
299                 goto out_err;
300         }
301
302         /* ke */
303         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
304                 goto out_err;
305         /* ki */
306         if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize))
307                 goto out_err;
308         /* ki */
309         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
310                 goto out_err;
311
312         CDEBUG(D_SEC, "successfully imported v2 context\n");
313         return 0;
314 out_err:
315         return GSS_S_FAILURE;
316 }
317
318 /*
319  * The whole purpose here is trying to keep user level gss context parsing
320  * from nfs-utils unchanged as possible as we can, they are not quite mature
321  * yet, and many stuff still not clear, like heimdal etc.
322  */
323 static
324 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
325                                       struct gss_ctx *gctx)
326 {
327         struct krb5_ctx *kctx;
328         char *p = (char *)inbuf->data;
329         char *end = (char *)(inbuf->data + inbuf->len);
330         unsigned int tmp_uint, rc;
331
332         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
333                 CERROR("Fail to read version\n");
334                 return GSS_S_FAILURE;
335         }
336
337         /* only support 0, 1 for the moment */
338         if (tmp_uint > 2) {
339                 CERROR("Invalid version %u\n", tmp_uint);
340                 return GSS_S_FAILURE;
341         }
342
343         OBD_ALLOC_PTR(kctx);
344         if (!kctx)
345                 return GSS_S_FAILURE;
346
347         if (tmp_uint == 0 || tmp_uint == 1) {
348                 kctx->kc_initiate = tmp_uint;
349                 rc = import_context_rfc1964(kctx, p, end);
350         } else {
351                 rc = import_context_rfc4121(kctx, p, end);
352         }
353
354         if (rc == 0)
355                 rc = krb5_init_keys(kctx);
356
357         if (rc) {
358                 delete_context_kerberos(kctx);
359                 OBD_FREE_PTR(kctx);
360
361                 return GSS_S_FAILURE;
362         }
363
364         gctx->internal_ctx_id = kctx;
365         return GSS_S_COMPLETE;
366 }
367
368 static
369 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
370                                         struct gss_ctx *gctx_new)
371 {
372         struct krb5_ctx *kctx = gctx->internal_ctx_id;
373         struct krb5_ctx *knew;
374
375         OBD_ALLOC_PTR(knew);
376         if (!knew)
377                 return GSS_S_FAILURE;
378
379         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
380         knew->kc_cfx = kctx->kc_cfx;
381         knew->kc_seed_init = kctx->kc_seed_init;
382         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
383         knew->kc_endtime = kctx->kc_endtime;
384
385         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
386         knew->kc_seq_send = kctx->kc_seq_recv;
387         knew->kc_seq_recv = kctx->kc_seq_send;
388         knew->kc_enctype = kctx->kc_enctype;
389
390         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
391                 goto out_err;
392
393         if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
394                 goto out_err;
395         if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
396                 goto out_err;
397         if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
398                 goto out_err;
399         if (krb5_init_keys(knew))
400                 goto out_err;
401
402         gctx_new->internal_ctx_id = knew;
403         CDEBUG(D_SEC, "successfully copied reverse context\n");
404         return GSS_S_COMPLETE;
405
406 out_err:
407         delete_context_kerberos(knew);
408         OBD_FREE_PTR(knew);
409         return GSS_S_FAILURE;
410 }
411
412 static
413 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
414                                    unsigned long  *endtime)
415 {
416         struct krb5_ctx *kctx = gctx->internal_ctx_id;
417
418         *endtime = (unsigned long)((__u32) kctx->kc_endtime);
419         return GSS_S_COMPLETE;
420 }
421
422 static
423 void gss_delete_sec_context_kerberos(void *internal_ctx)
424 {
425         struct krb5_ctx *kctx = internal_ctx;
426
427         delete_context_kerberos(kctx);
428         OBD_FREE_PTR(kctx);
429 }
430
431 /*
432  * compute (keyed/keyless) checksum against the plain text which appended
433  * with krb5 wire token header.
434  */
435 static
436 __s32 krb5_make_checksum(__u32 enctype,
437                          struct gss_keyblock *kb,
438                          struct krb5_header *khdr,
439                          int msgcnt, rawobj_t *msgs,
440                          int iovcnt, lnet_kiov_t *iovs,
441                          rawobj_t *cksum)
442 {
443         struct krb5_enctype   *ke = &enctypes[enctype];
444         struct crypto_ahash   *tfm;
445         rawobj_t               hdr;
446         __u32                  code = GSS_S_FAILURE;
447         int                    rc;
448
449         tfm = crypto_alloc_ahash(ke->ke_hash_name, 0, CRYPTO_ALG_ASYNC);
450         if (IS_ERR(tfm)) {
451                 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
452                 return GSS_S_FAILURE;
453         }
454
455         cksum->len = crypto_ahash_digestsize(tfm);
456         OBD_ALLOC_LARGE(cksum->data, cksum->len);
457         if (!cksum->data) {
458                 cksum->len = 0;
459                 goto out_tfm;
460         }
461
462         hdr.data = (__u8 *)khdr;
463         hdr.len = sizeof(*khdr);
464
465         if (ke->ke_hash_hmac)
466                 rc = gss_digest_hmac(tfm, &kb->kb_key,
467                                      &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
468         else
469                 rc = gss_digest_norm(tfm, kb,
470                                      &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
471
472         if (rc == 0)
473                 code = GSS_S_COMPLETE;
474 out_tfm:
475         crypto_free_ahash(tfm);
476         return code;
477 }
478
479 static void fill_krb5_header(struct krb5_ctx *kctx,
480                              struct krb5_header *khdr,
481                              int privacy)
482 {
483         unsigned char acceptor_flag;
484
485         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
486
487         if (privacy) {
488                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
489                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
490                 khdr->kh_ec = cpu_to_be16(0);
491                 khdr->kh_rrc = cpu_to_be16(0);
492         } else {
493                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
494                 khdr->kh_flags = acceptor_flag;
495                 khdr->kh_ec = cpu_to_be16(0xffff);
496                 khdr->kh_rrc = cpu_to_be16(0xffff);
497         }
498
499         khdr->kh_filler = 0xff;
500         spin_lock(&krb5_seq_lock);
501         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
502         spin_unlock(&krb5_seq_lock);
503 }
504
505 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
506                                 struct krb5_header *khdr,
507                                 int privacy)
508 {
509         unsigned char acceptor_flag;
510         __u16         tok_id, ec_rrc;
511
512         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
513
514         if (privacy) {
515                 tok_id = KG_TOK_WRAP_MSG;
516                 ec_rrc = 0x0;
517         } else {
518                 tok_id = KG_TOK_MIC_MSG;
519                 ec_rrc = 0xffff;
520         }
521
522         /* sanity checks */
523         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
524                 CERROR("bad token id\n");
525                 return GSS_S_DEFECTIVE_TOKEN;
526         }
527         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
528                 CERROR("bad direction flag\n");
529                 return GSS_S_BAD_SIG;
530         }
531         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
532                 CERROR("missing confidential flag\n");
533                 return GSS_S_BAD_SIG;
534         }
535         if (khdr->kh_filler != 0xff) {
536                 CERROR("bad filler\n");
537                 return GSS_S_DEFECTIVE_TOKEN;
538         }
539         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
540             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
541                 CERROR("bad EC or RRC\n");
542                 return GSS_S_DEFECTIVE_TOKEN;
543         }
544         return GSS_S_COMPLETE;
545 }
546
547 static
548 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
549                            int msgcnt,
550                            rawobj_t *msgs,
551                            int iovcnt,
552                            lnet_kiov_t *iovs,
553                            rawobj_t *token)
554 {
555         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
556         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
557         struct krb5_header  *khdr;
558         rawobj_t             cksum = RAWOBJ_EMPTY;
559
560         /* fill krb5 header */
561         LASSERT(token->len >= sizeof(*khdr));
562         khdr = (struct krb5_header *)token->data;
563         fill_krb5_header(kctx, khdr, 0);
564
565         /* checksum */
566         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
567                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
568                 return GSS_S_FAILURE;
569
570         LASSERT(cksum.len >= ke->ke_hash_size);
571         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
572         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
573                ke->ke_hash_size);
574
575         token->len = sizeof(*khdr) + ke->ke_hash_size;
576         rawobj_free(&cksum);
577         return GSS_S_COMPLETE;
578 }
579
580 static
581 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
582                               int msgcnt,
583                               rawobj_t *msgs,
584                               int iovcnt,
585                               lnet_kiov_t *iovs,
586                               rawobj_t *token)
587 {
588         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
589         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
590         struct krb5_header  *khdr;
591         rawobj_t             cksum = RAWOBJ_EMPTY;
592         __u32                major;
593
594         if (token->len < sizeof(*khdr)) {
595                 CERROR("short signature: %u\n", token->len);
596                 return GSS_S_DEFECTIVE_TOKEN;
597         }
598
599         khdr = (struct krb5_header *)token->data;
600
601         major = verify_krb5_header(kctx, khdr, 0);
602         if (major != GSS_S_COMPLETE) {
603                 CERROR("bad krb5 header\n");
604                 return major;
605         }
606
607         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
608                 CERROR("short signature: %u, require %d\n",
609                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
610                 return GSS_S_FAILURE;
611         }
612
613         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
614                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
615                 CERROR("failed to make checksum\n");
616                 return GSS_S_FAILURE;
617         }
618
619         LASSERT(cksum.len >= ke->ke_hash_size);
620         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
621                    ke->ke_hash_size)) {
622                 CERROR("checksum mismatch\n");
623                 rawobj_free(&cksum);
624                 return GSS_S_BAD_SIG;
625         }
626
627         rawobj_free(&cksum);
628         return GSS_S_COMPLETE;
629 }
630
631 /*
632  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
633  */
634 static
635 int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
636                       struct krb5_header *khdr,
637                       char *confounder,
638                       struct ptlrpc_bulk_desc *desc,
639                       rawobj_t *cipher,
640                       int adj_nob)
641 {
642         struct blkcipher_desc   ciph_desc;
643         __u8                    local_iv[16] = {0};
644         struct scatterlist      src, dst;
645         struct sg_table         sg_src, sg_dst;
646         int                     blocksize, i, rc, nob = 0;
647
648         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
649         LASSERT(desc->bd_iov_count);
650         LASSERT(GET_ENC_KIOV(desc));
651
652         blocksize = crypto_blkcipher_blocksize(tfm);
653         LASSERT(blocksize > 1);
654         LASSERT(cipher->len == blocksize + sizeof(*khdr));
655
656         ciph_desc.tfm  = tfm;
657         ciph_desc.info = local_iv;
658         ciph_desc.flags = 0;
659
660         /* encrypt confounder */
661         rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
662         if (rc != 0)
663                 return rc;
664
665         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
666         if (rc != 0) {
667                 gss_teardown_sgtable(&sg_src);
668                 return rc;
669         }
670
671         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
672                                          sg_src.sgl, blocksize);
673
674         gss_teardown_sgtable(&sg_dst);
675         gss_teardown_sgtable(&sg_src);
676
677         if (rc) {
678                 CERROR("error to encrypt confounder: %d\n", rc);
679                 return rc;
680         }
681
682         /* encrypt clear pages */
683         for (i = 0; i < desc->bd_iov_count; i++) {
684                 sg_init_table(&src, 1);
685                 sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
686                             (BD_GET_KIOV(desc, i).kiov_len +
687                                 blocksize - 1) &
688                             (~(blocksize - 1)),
689                             BD_GET_KIOV(desc, i).kiov_offset);
690                 if (adj_nob)
691                         nob += src.length;
692                 sg_init_table(&dst, 1);
693                 sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
694                             src.length, src.offset);
695
696                 BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
697                 BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
698
699                 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
700                                                     src.length);
701                 if (rc) {
702                         CERROR("error to encrypt page: %d\n", rc);
703                         return rc;
704                 }
705         }
706
707         /* encrypt krb5 header */
708         rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
709         if (rc != 0)
710                 return rc;
711
712         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
713                            sizeof(*khdr));
714         if (rc != 0) {
715                 gss_teardown_sgtable(&sg_src);
716                 return rc;
717         }
718
719         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
720                                          sizeof(*khdr));
721
722         gss_teardown_sgtable(&sg_dst);
723         gss_teardown_sgtable(&sg_src);
724
725         if (rc) {
726                 CERROR("error to encrypt krb5 header: %d\n", rc);
727                 return rc;
728         }
729
730         if (adj_nob)
731                 desc->bd_nob = nob;
732
733         return 0;
734 }
735
736 /*
737  * desc->bd_nob_transferred is the size of cipher text received.
738  * desc->bd_nob is the target size of plain text supposed to be.
739  *
740  * if adj_nob != 0, we adjust each page's kiov_len to the actual
741  * plain text size.
742  * - for client read: we don't know data size for each page, so
743  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
744  *   be smaller, so we need to adjust it according to
745  *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
746  *   this means we DO NOT support the situation that server send an odd size
747  *   data in a page which is not the last one.
748  * - for server write: we knows exactly data size for each page being expected,
749  *   thus kiov_len is accurate already, so we should not adjust it at all.
750  *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
751  *   round_up(bd_iov[]->kiov_len) which
752  *   should have been done by prep_bulk().
753  */
754 static
755 int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
756                       struct krb5_header *khdr,
757                       struct ptlrpc_bulk_desc *desc,
758                       rawobj_t *cipher,
759                       rawobj_t *plain,
760                       int adj_nob)
761 {
762         struct blkcipher_desc   ciph_desc;
763         __u8                    local_iv[16] = {0};
764         struct scatterlist      src, dst;
765         struct sg_table         sg_src, sg_dst;
766         int                     ct_nob = 0, pt_nob = 0;
767         int                     blocksize, i, rc;
768
769         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
770         LASSERT(desc->bd_iov_count);
771         LASSERT(GET_ENC_KIOV(desc));
772         LASSERT(desc->bd_nob_transferred);
773
774         blocksize = crypto_blkcipher_blocksize(tfm);
775         LASSERT(blocksize > 1);
776         LASSERT(cipher->len == blocksize + sizeof(*khdr));
777
778         ciph_desc.tfm  = tfm;
779         ciph_desc.info = local_iv;
780         ciph_desc.flags = 0;
781
782         if (desc->bd_nob_transferred % blocksize) {
783                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
784                 return -EPROTO;
785         }
786
787         /* decrypt head (confounder) */
788         rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
789         if (rc != 0)
790                 return rc;
791
792         rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
793         if (rc != 0) {
794                 gss_teardown_sgtable(&sg_src);
795                 return rc;
796         }
797
798         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
799                                          sg_src.sgl, blocksize);
800
801         gss_teardown_sgtable(&sg_dst);
802         gss_teardown_sgtable(&sg_src);
803
804         if (rc) {
805                 CERROR("error to decrypt confounder: %d\n", rc);
806                 return rc;
807         }
808
809         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
810              i++) {
811                 if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
812                     != 0 ||
813                     BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
814                     != 0) {
815                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
816                                i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
817                                BD_GET_ENC_KIOV(desc, i).kiov_len,
818                                blocksize);
819                         return -EFAULT;
820                 }
821
822                 if (adj_nob) {
823                         if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
824                             desc->bd_nob_transferred)
825                                 BD_GET_ENC_KIOV(desc, i).kiov_len =
826                                         desc->bd_nob_transferred - ct_nob;
827
828                         BD_GET_KIOV(desc, i).kiov_len =
829                           BD_GET_ENC_KIOV(desc, i).kiov_len;
830                         if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
831                             desc->bd_nob)
832                                 BD_GET_KIOV(desc, i).kiov_len =
833                                   desc->bd_nob - pt_nob;
834                 } else {
835                         /* this should be guaranteed by LNET */
836                         LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
837                                 kiov_len <=
838                                 desc->bd_nob_transferred);
839                         LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
840                                 BD_GET_ENC_KIOV(desc, i).kiov_len);
841                 }
842
843                 if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
844                         continue;
845
846                 sg_init_table(&src, 1);
847                 sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
848                             BD_GET_ENC_KIOV(desc, i).kiov_len,
849                             BD_GET_ENC_KIOV(desc, i).kiov_offset);
850                 dst = src;
851                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
852                         sg_assign_page(&dst,
853                                        BD_GET_KIOV(desc, i).kiov_page);
854
855                 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
856                                                  src.length);
857                 if (rc) {
858                         CERROR("error to decrypt page: %d\n", rc);
859                         return rc;
860                 }
861
862                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
863                         memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
864                                BD_GET_KIOV(desc, i).kiov_offset,
865                                page_address(BD_GET_ENC_KIOV(desc, i).
866                                             kiov_page) +
867                                BD_GET_KIOV(desc, i).kiov_offset,
868                                BD_GET_KIOV(desc, i).kiov_len);
869                 }
870
871                 ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
872                 pt_nob += BD_GET_KIOV(desc, i).kiov_len;
873         }
874
875         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
876                 CERROR("%d cipher text transferred but only %d decrypted\n",
877                        desc->bd_nob_transferred, ct_nob);
878                 return -EFAULT;
879         }
880
881         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
882                 CERROR("%d plain text expected but only %d received\n",
883                        desc->bd_nob, pt_nob);
884                 return -EFAULT;
885         }
886
887         /* if needed, clear up the rest unused iovs */
888         if (adj_nob)
889                 while (i < desc->bd_iov_count)
890                         BD_GET_KIOV(desc, i++).kiov_len = 0;
891
892         /* decrypt tail (krb5 header) */
893         rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
894                                sizeof(*khdr));
895         if (rc != 0)
896                 return rc;
897
898         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
899                                sizeof(*khdr));
900         if (rc != 0) {
901                 gss_teardown_sgtable(&sg_src);
902                 return rc;
903         }
904
905         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
906                                          sizeof(*khdr));
907
908         gss_teardown_sgtable(&sg_src);
909         gss_teardown_sgtable(&sg_dst);
910
911         if (rc) {
912                 CERROR("error to decrypt tail: %d\n", rc);
913                 return rc;
914         }
915
916         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
917                 CERROR("krb5 header doesn't match\n");
918                 return -EACCES;
919         }
920
921         return 0;
922 }
923
924 static
925 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
926                         rawobj_t *gsshdr,
927                         rawobj_t *msg,
928                         int msg_buflen,
929                         rawobj_t *token)
930 {
931         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
932         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
933         struct krb5_header  *khdr;
934         int                  blocksize;
935         rawobj_t             cksum = RAWOBJ_EMPTY;
936         rawobj_t             data_desc[3], cipher;
937         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
938         __u8                 local_iv[16] = {0};
939         int                  rc = 0;
940
941         LASSERT(ke);
942         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
943         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
944                 ke->ke_conf_size >=
945                 crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
946
947         /*
948          * final token format:
949          * ---------------------------------------------------
950          * | krb5 header | cipher text | checksum (16 bytes) |
951          * ---------------------------------------------------
952          */
953
954         /* fill krb5 header */
955         LASSERT(token->len >= sizeof(*khdr));
956         khdr = (struct krb5_header *)token->data;
957         fill_krb5_header(kctx, khdr, 1);
958
959         /* generate confounder */
960         cfs_get_random_bytes(conf, ke->ke_conf_size);
961
962         /* get encryption blocksize. note kc_keye might not associated with
963          * a tfm, currently only for arcfour-hmac */
964         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
965                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
966                 blocksize = 1;
967         } else {
968                 LASSERT(kctx->kc_keye.kb_tfm);
969                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
970         }
971         LASSERT(blocksize <= ke->ke_conf_size);
972
973         /* padding the message */
974         if (gss_add_padding(msg, msg_buflen, blocksize))
975                 return GSS_S_FAILURE;
976
977         /*
978          * clear text layout for checksum:
979          * ------------------------------------------------------
980          * | confounder | gss header | clear msgs | krb5 header |
981          * ------------------------------------------------------
982          */
983         data_desc[0].data = conf;
984         data_desc[0].len = ke->ke_conf_size;
985         data_desc[1].data = gsshdr->data;
986         data_desc[1].len = gsshdr->len;
987         data_desc[2].data = msg->data;
988         data_desc[2].len = msg->len;
989
990         /* compute checksum */
991         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
992                                khdr, 3, data_desc, 0, NULL, &cksum))
993                 return GSS_S_FAILURE;
994         LASSERT(cksum.len >= ke->ke_hash_size);
995
996         /*
997          * clear text layout for encryption:
998          * -----------------------------------------
999          * | confounder | clear msgs | krb5 header |
1000          * -----------------------------------------
1001          */
1002         data_desc[0].data = conf;
1003         data_desc[0].len = ke->ke_conf_size;
1004         data_desc[1].data = msg->data;
1005         data_desc[1].len = msg->len;
1006         data_desc[2].data = (__u8 *) khdr;
1007         data_desc[2].len = sizeof(*khdr);
1008
1009         /* cipher text will be directly inplace */
1010         cipher.data = (__u8 *)(khdr + 1);
1011         cipher.len = token->len - sizeof(*khdr);
1012         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1013
1014         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1015                 rawobj_t                 arc4_keye;
1016                 struct crypto_blkcipher *arc4_tfm;
1017
1018                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1019                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1020                         CERROR("failed to obtain arc4 enc key\n");
1021                         GOTO(arc4_out, rc = -EACCES);
1022                 }
1023
1024                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1025                 if (IS_ERR(arc4_tfm)) {
1026                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1027                         GOTO(arc4_out_key, rc = -EACCES);
1028                 }
1029
1030                 if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1031                                                arc4_keye.len)) {
1032                         CERROR("failed to set arc4 key, len %d\n",
1033                                arc4_keye.len);
1034                         GOTO(arc4_out_tfm, rc = -EACCES);
1035                 }
1036
1037                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
1038                                        &cipher, 1);
1039 arc4_out_tfm:
1040                 crypto_free_blkcipher(arc4_tfm);
1041 arc4_out_key:
1042                 rawobj_free(&arc4_keye);
1043 arc4_out:
1044                 do {} while(0); /* just to avoid compile warning */
1045         } else {
1046                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
1047                                        data_desc, &cipher, 1);
1048         }
1049
1050         if (rc != 0) {
1051                 rawobj_free(&cksum);
1052                 return GSS_S_FAILURE;
1053         }
1054
1055         /* fill in checksum */
1056         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1057         memcpy((char *)(khdr + 1) + cipher.len,
1058                cksum.data + cksum.len - ke->ke_hash_size,
1059                ke->ke_hash_size);
1060         rawobj_free(&cksum);
1061
1062         /* final token length */
1063         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1064         return GSS_S_COMPLETE;
1065 }
1066
1067 static
1068 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1069                              struct ptlrpc_bulk_desc *desc)
1070 {
1071         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1072         int                  blocksize, i;
1073
1074         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1075         LASSERT(desc->bd_iov_count);
1076         LASSERT(GET_ENC_KIOV(desc));
1077         LASSERT(kctx->kc_keye.kb_tfm);
1078
1079         blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1080
1081         for (i = 0; i < desc->bd_iov_count; i++) {
1082                 LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
1083                 /*
1084                  * offset should always start at page boundary of either
1085                  * client or server side.
1086                  */
1087                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
1088                         CERROR("odd offset %d in page %d\n",
1089                                BD_GET_KIOV(desc, i).kiov_offset, i);
1090                         return GSS_S_FAILURE;
1091                 }
1092
1093                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
1094                         BD_GET_KIOV(desc, i).kiov_offset;
1095                 BD_GET_ENC_KIOV(desc, i).kiov_len =
1096                         (BD_GET_KIOV(desc, i).kiov_len +
1097                          blocksize - 1) & (~(blocksize - 1));
1098         }
1099
1100         return GSS_S_COMPLETE;
1101 }
1102
1103 static
1104 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1105                              struct ptlrpc_bulk_desc *desc,
1106                              rawobj_t *token, int adj_nob)
1107 {
1108         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1109         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1110         struct krb5_header  *khdr;
1111         int                  blocksize;
1112         rawobj_t             cksum = RAWOBJ_EMPTY;
1113         rawobj_t             data_desc[1], cipher;
1114         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1115         int                  rc = 0;
1116
1117         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1118         LASSERT(ke);
1119         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1120
1121         /*
1122          * final token format:
1123          * --------------------------------------------------
1124          * | krb5 header | head/tail cipher text | checksum |
1125          * --------------------------------------------------
1126          */
1127
1128         /* fill krb5 header */
1129         LASSERT(token->len >= sizeof(*khdr));
1130         khdr = (struct krb5_header *)token->data;
1131         fill_krb5_header(kctx, khdr, 1);
1132
1133         /* generate confounder */
1134         cfs_get_random_bytes(conf, ke->ke_conf_size);
1135
1136         /* get encryption blocksize. note kc_keye might not associated with
1137          * a tfm, currently only for arcfour-hmac */
1138         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1139                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1140                 blocksize = 1;
1141         } else {
1142                 LASSERT(kctx->kc_keye.kb_tfm);
1143                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1144         }
1145
1146         /*
1147          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1148          * the bulk token size would be exactly (sizeof(krb5_header) +
1149          * blocksize + sizeof(krb5_header) + hashsize)
1150          */
1151         LASSERT(blocksize <= ke->ke_conf_size);
1152         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1153         LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1154
1155         /*
1156          * clear text layout for checksum:
1157          * ------------------------------------------
1158          * | confounder | clear pages | krb5 header |
1159          * ------------------------------------------
1160          */
1161         data_desc[0].data = conf;
1162         data_desc[0].len = ke->ke_conf_size;
1163
1164         /* compute checksum */
1165         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1166                                khdr, 1, data_desc,
1167                                desc->bd_iov_count, GET_KIOV(desc),
1168                                &cksum))
1169                 return GSS_S_FAILURE;
1170         LASSERT(cksum.len >= ke->ke_hash_size);
1171
1172         /*
1173          * clear text layout for encryption:
1174          * ------------------------------------------
1175          * | confounder | clear pages | krb5 header |
1176          * ------------------------------------------
1177          *        |              |             |
1178          *        ----------  (cipher pages)   |
1179          * result token:   |                   |
1180          * -------------------------------------------
1181          * | krb5 header | cipher text | cipher text |
1182          * -------------------------------------------
1183          */
1184         data_desc[0].data = conf;
1185         data_desc[0].len = ke->ke_conf_size;
1186
1187         cipher.data = (__u8 *)(khdr + 1);
1188         cipher.len = blocksize + sizeof(*khdr);
1189
1190         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1191                 LBUG();
1192                 rc = 0;
1193         } else {
1194                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1195                                        conf, desc, &cipher, adj_nob);
1196         }
1197
1198         if (rc != 0) {
1199                 rawobj_free(&cksum);
1200                 return GSS_S_FAILURE;
1201         }
1202
1203         /* fill in checksum */
1204         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1205         memcpy((char *)(khdr + 1) + cipher.len,
1206                cksum.data + cksum.len - ke->ke_hash_size,
1207                ke->ke_hash_size);
1208         rawobj_free(&cksum);
1209
1210         /* final token length */
1211         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1212         return GSS_S_COMPLETE;
1213 }
1214
1215 static
1216 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1217                           rawobj_t        *gsshdr,
1218                           rawobj_t        *token,
1219                           rawobj_t        *msg)
1220 {
1221         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1222         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1223         struct krb5_header  *khdr;
1224         unsigned char       *tmpbuf;
1225         int                  blocksize, bodysize;
1226         rawobj_t             cksum = RAWOBJ_EMPTY;
1227         rawobj_t             cipher_in, plain_out;
1228         rawobj_t             hash_objs[3];
1229         int                  rc = 0;
1230         __u32                major;
1231         __u8                 local_iv[16] = {0};
1232
1233         LASSERT(ke);
1234
1235         if (token->len < sizeof(*khdr)) {
1236                 CERROR("short signature: %u\n", token->len);
1237                 return GSS_S_DEFECTIVE_TOKEN;
1238         }
1239
1240         khdr = (struct krb5_header *)token->data;
1241
1242         major = verify_krb5_header(kctx, khdr, 1);
1243         if (major != GSS_S_COMPLETE) {
1244                 CERROR("bad krb5 header\n");
1245                 return major;
1246         }
1247
1248         /* block size */
1249         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1250                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1251                 blocksize = 1;
1252         } else {
1253                 LASSERT(kctx->kc_keye.kb_tfm);
1254                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1255         }
1256
1257         /* expected token layout:
1258          * ----------------------------------------
1259          * | krb5 header | cipher text | checksum |
1260          * ----------------------------------------
1261          */
1262         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1263
1264         if (bodysize % blocksize) {
1265                 CERROR("odd bodysize %d\n", bodysize);
1266                 return GSS_S_DEFECTIVE_TOKEN;
1267         }
1268
1269         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1270                 CERROR("incomplete token: bodysize %d\n", bodysize);
1271                 return GSS_S_DEFECTIVE_TOKEN;
1272         }
1273
1274         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1275                 CERROR("buffer too small: %u, require %d\n",
1276                        msg->len, bodysize - ke->ke_conf_size);
1277                 return GSS_S_FAILURE;
1278         }
1279
1280         /* decrypting */
1281         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1282         if (!tmpbuf)
1283                 return GSS_S_FAILURE;
1284
1285         major = GSS_S_FAILURE;
1286
1287         cipher_in.data = (__u8 *)(khdr + 1);
1288         cipher_in.len = bodysize;
1289         plain_out.data = tmpbuf;
1290         plain_out.len = bodysize;
1291
1292         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1293                 rawobj_t                 arc4_keye;
1294                 struct crypto_blkcipher *arc4_tfm;
1295
1296                 cksum.data = token->data + token->len - ke->ke_hash_size;
1297                 cksum.len = ke->ke_hash_size;
1298
1299                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1300                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1301                         CERROR("failed to obtain arc4 enc key\n");
1302                         GOTO(arc4_out, rc = -EACCES);
1303                 }
1304
1305                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1306                 if (IS_ERR(arc4_tfm)) {
1307                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1308                         GOTO(arc4_out_key, rc = -EACCES);
1309                 }
1310
1311                 if (crypto_blkcipher_setkey(arc4_tfm,
1312                                          arc4_keye.data, arc4_keye.len)) {
1313                         CERROR("failed to set arc4 key, len %d\n",
1314                                arc4_keye.len);
1315                         GOTO(arc4_out_tfm, rc = -EACCES);
1316                 }
1317
1318                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
1319                                        &plain_out, 0);
1320 arc4_out_tfm:
1321                 crypto_free_blkcipher(arc4_tfm);
1322 arc4_out_key:
1323                 rawobj_free(&arc4_keye);
1324 arc4_out:
1325                 cksum = RAWOBJ_EMPTY;
1326         } else {
1327                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1,
1328                                        &cipher_in, &plain_out, 0);
1329         }
1330
1331         if (rc != 0) {
1332                 CERROR("error decrypt\n");
1333                 goto out_free;
1334         }
1335         LASSERT(plain_out.len == bodysize);
1336
1337         /* expected clear text layout:
1338          * -----------------------------------------
1339          * | confounder | clear msgs | krb5 header |
1340          * -----------------------------------------
1341          */
1342
1343         /* verify krb5 header in token is not modified */
1344         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1345                    sizeof(*khdr))) {
1346                 CERROR("decrypted krb5 header mismatch\n");
1347                 goto out_free;
1348         }
1349
1350         /* verify checksum, compose clear text as layout:
1351          * ------------------------------------------------------
1352          * | confounder | gss header | clear msgs | krb5 header |
1353          * ------------------------------------------------------
1354          */
1355         hash_objs[0].len = ke->ke_conf_size;
1356         hash_objs[0].data = plain_out.data;
1357         hash_objs[1].len = gsshdr->len;
1358         hash_objs[1].data = gsshdr->data;
1359         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1360         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1361         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1362                                khdr, 3, hash_objs, 0, NULL, &cksum))
1363                 goto out_free;
1364
1365         LASSERT(cksum.len >= ke->ke_hash_size);
1366         if (memcmp((char *)(khdr + 1) + bodysize,
1367                    cksum.data + cksum.len - ke->ke_hash_size,
1368                    ke->ke_hash_size)) {
1369                 CERROR("checksum mismatch\n");
1370                 goto out_free;
1371         }
1372
1373         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1374         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1375
1376         major = GSS_S_COMPLETE;
1377 out_free:
1378         OBD_FREE_LARGE(tmpbuf, bodysize);
1379         rawobj_free(&cksum);
1380         return major;
1381 }
1382
1383 static
1384 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1385                                struct ptlrpc_bulk_desc *desc,
1386                                rawobj_t *token, int adj_nob)
1387 {
1388         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1389         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1390         struct krb5_header  *khdr;
1391         int                  blocksize;
1392         rawobj_t             cksum = RAWOBJ_EMPTY;
1393         rawobj_t             cipher, plain;
1394         rawobj_t             data_desc[1];
1395         int                  rc;
1396         __u32                major;
1397
1398         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1399         LASSERT(ke);
1400
1401         if (token->len < sizeof(*khdr)) {
1402                 CERROR("short signature: %u\n", token->len);
1403                 return GSS_S_DEFECTIVE_TOKEN;
1404         }
1405
1406         khdr = (struct krb5_header *)token->data;
1407
1408         major = verify_krb5_header(kctx, khdr, 1);
1409         if (major != GSS_S_COMPLETE) {
1410                 CERROR("bad krb5 header\n");
1411                 return major;
1412         }
1413
1414         /* block size */
1415         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1416                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1417                 blocksize = 1;
1418                 LBUG();
1419         } else {
1420                 LASSERT(kctx->kc_keye.kb_tfm);
1421                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1422         }
1423         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1424
1425         /*
1426          * token format is expected as:
1427          * -----------------------------------------------
1428          * | krb5 header | head/tail cipher text | cksum |
1429          * -----------------------------------------------
1430          */
1431         if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1432                          ke->ke_hash_size) {
1433                 CERROR("short token size: %u\n", token->len);
1434                 return GSS_S_DEFECTIVE_TOKEN;
1435         }
1436
1437         cipher.data = (__u8 *) (khdr + 1);
1438         cipher.len = blocksize + sizeof(*khdr);
1439         plain.data = cipher.data;
1440         plain.len = cipher.len;
1441
1442         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1443                                desc, &cipher, &plain, adj_nob);
1444         if (rc)
1445                 return GSS_S_DEFECTIVE_TOKEN;
1446
1447         /*
1448          * verify checksum, compose clear text as layout:
1449          * ------------------------------------------
1450          * | confounder | clear pages | krb5 header |
1451          * ------------------------------------------
1452          */
1453         data_desc[0].data = plain.data;
1454         data_desc[0].len = blocksize;
1455
1456         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1457                                khdr, 1, data_desc,
1458                                desc->bd_iov_count,
1459                                GET_KIOV(desc),
1460                                &cksum))
1461                 return GSS_S_FAILURE;
1462         LASSERT(cksum.len >= ke->ke_hash_size);
1463
1464         if (memcmp(plain.data + blocksize + sizeof(*khdr),
1465                    cksum.data + cksum.len - ke->ke_hash_size,
1466                    ke->ke_hash_size)) {
1467                 CERROR("checksum mismatch\n");
1468                 rawobj_free(&cksum);
1469                 return GSS_S_BAD_SIG;
1470         }
1471
1472         rawobj_free(&cksum);
1473         return GSS_S_COMPLETE;
1474 }
1475
1476 int gss_display_kerberos(struct gss_ctx        *ctx,
1477                          char                  *buf,
1478                          int                    bufsize)
1479 {
1480         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1481         int                 written;
1482
1483         written = snprintf(buf, bufsize, "krb5 (%s)",
1484                            enctype2str(kctx->kc_enctype));
1485         return written;
1486 }
1487
1488 static struct gss_api_ops gss_kerberos_ops = {
1489         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1490         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1491         .gss_inquire_context        = gss_inquire_context_kerberos,
1492         .gss_get_mic                = gss_get_mic_kerberos,
1493         .gss_verify_mic             = gss_verify_mic_kerberos,
1494         .gss_wrap                   = gss_wrap_kerberos,
1495         .gss_unwrap                 = gss_unwrap_kerberos,
1496         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1497         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1498         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1499         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1500         .gss_display                = gss_display_kerberos,
1501 };
1502
1503 static struct subflavor_desc gss_kerberos_sfs[] = {
1504         {
1505                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1506                 .sf_qop         = 0,
1507                 .sf_service     = SPTLRPC_SVC_NULL,
1508                 .sf_name        = "krb5n"
1509         },
1510         {
1511                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1512                 .sf_qop         = 0,
1513                 .sf_service     = SPTLRPC_SVC_AUTH,
1514                 .sf_name        = "krb5a"
1515         },
1516         {
1517                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1518                 .sf_qop         = 0,
1519                 .sf_service     = SPTLRPC_SVC_INTG,
1520                 .sf_name        = "krb5i"
1521         },
1522         {
1523                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1524                 .sf_qop         = 0,
1525                 .sf_service     = SPTLRPC_SVC_PRIV,
1526                 .sf_name        = "krb5p"
1527         },
1528 };
1529
1530 static struct gss_api_mech gss_kerberos_mech = {
1531         /* .gm_owner uses default NULL value for THIS_MODULE */
1532         .gm_name        = "krb5",
1533         .gm_oid         = (rawobj_t)
1534                                 {9, "\052\206\110\206\367\022\001\002\002"},
1535         .gm_ops         = &gss_kerberos_ops,
1536         .gm_sf_num      = 4,
1537         .gm_sfs         = gss_kerberos_sfs,
1538 };
1539
1540 int __init init_kerberos_module(void)
1541 {
1542         int status;
1543
1544         spin_lock_init(&krb5_seq_lock);
1545
1546         status = lgss_mech_register(&gss_kerberos_mech);
1547         if (status)
1548                 CERROR("Failed to register kerberos gss mechanism!\n");
1549         return status;
1550 }
1551
1552 void cleanup_kerberos_module(void)
1553 {
1554         lgss_mech_unregister(&gss_kerberos_mech);
1555 }