Whamcloud - gitweb
305402d9e1c57d0e9e097b48fc109bbcd45c19c9
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, 2015, Intel Corporation.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/crypto.h>
56 #include <linux/mutex.h>
57
58 #include <obd.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre_net.h>
62 #include <lustre_import.h>
63 #include <lustre_sec.h>
64
65 #include "gss_err.h"
66 #include "gss_internal.h"
67 #include "gss_api.h"
68 #include "gss_asn1.h"
69 #include "gss_krb5.h"
70 #include "gss_crypto.h"
71
72 static spinlock_t krb5_seq_lock;
73
74 struct krb5_enctype {
75         char           *ke_dispname;
76         char           *ke_enc_name;            /* linux tfm name */
77         char           *ke_hash_name;           /* linux tfm name */
78         int             ke_enc_mode;            /* linux tfm mode */
79         int             ke_hash_size;           /* checksum size */
80         int             ke_conf_size;           /* confounder size */
81         unsigned int    ke_hash_hmac:1;         /* is hmac? */
82 };
83
84 /*
85  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
86  * but currently we simply CBC with padding, because linux doesn't support CTS
87  * yet. this need to be fixed in the future.
88  */
89 static struct krb5_enctype enctypes[] = {
90         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
91                 .ke_dispname    = "des-cbc-md5",
92                 .ke_enc_name    = "cbc(des)",
93                 .ke_hash_name   = "md5",
94                 .ke_hash_size   = 16,
95                 .ke_conf_size   = 8,
96         },
97         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
98                 .ke_dispname    = "des3-hmac-sha1",
99                 .ke_enc_name    = "cbc(des3_ede)",
100                 .ke_hash_name   = "sha1",
101                 .ke_hash_size   = 20,
102                 .ke_conf_size   = 8,
103                 .ke_hash_hmac   = 1,
104         },
105         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
106                 .ke_dispname    = "aes128-cts-hmac-sha1-96",
107                 .ke_enc_name    = "cbc(aes)",
108                 .ke_hash_name   = "sha1",
109                 .ke_hash_size   = 12,
110                 .ke_conf_size   = 16,
111                 .ke_hash_hmac   = 1,
112         },
113         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
114                 .ke_dispname    = "aes256-cts-hmac-sha1-96",
115                 .ke_enc_name    = "cbc(aes)",
116                 .ke_hash_name   = "sha1",
117                 .ke_hash_size   = 12,
118                 .ke_conf_size   = 16,
119                 .ke_hash_hmac   = 1,
120         },
121         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
122                 .ke_dispname    = "arcfour-hmac-md5",
123                 .ke_enc_name    = "ecb(arc4)",
124                 .ke_hash_name   = "md5",
125                 .ke_hash_size   = 16,
126                 .ke_conf_size   = 8,
127                 .ke_hash_hmac   = 1,
128         }
129 };
130
131 static const char * enctype2str(__u32 enctype)
132 {
133         if (enctype < ARRAY_SIZE(enctypes) && enctypes[enctype].ke_dispname)
134                 return enctypes[enctype].ke_dispname;
135
136         return "unknown";
137 }
138
139 static
140 int krb5_init_keys(struct krb5_ctx *kctx)
141 {
142         struct krb5_enctype *ke;
143
144         if (kctx->kc_enctype >= ARRAY_SIZE(enctypes) ||
145             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
146                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
147                 return -1;
148         }
149
150         ke = &enctypes[kctx->kc_enctype];
151
152         /* tfm arc4 is stateful, user should alloc-use-free by his own */
153         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
154             gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
155                 return -1;
156
157         /* tfm hmac is stateful, user should alloc-use-free by his own */
158         if (ke->ke_hash_hmac == 0 &&
159             gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
160                 return -1;
161         if (ke->ke_hash_hmac == 0 &&
162             gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
163                 return -1;
164
165         return 0;
166 }
167
168 static
169 void delete_context_kerberos(struct krb5_ctx *kctx)
170 {
171         rawobj_free(&kctx->kc_mech_used);
172
173         gss_keyblock_free(&kctx->kc_keye);
174         gss_keyblock_free(&kctx->kc_keyi);
175         gss_keyblock_free(&kctx->kc_keyc);
176 }
177
178 static
179 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
180 {
181         unsigned int    tmp_uint, keysize;
182
183         /* seed_init flag */
184         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
185                 goto out_err;
186         kctx->kc_seed_init = (tmp_uint != 0);
187
188         /* seed */
189         if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
190                 goto out_err;
191
192         /* sign/seal algorithm, not really used now */
193         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
194             gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
195                 goto out_err;
196
197         /* end time. While kc_endtime might be 64 bit the krb5 API
198          * still uses 32 bits. To delay the 2038 bug see the incoming
199          * value as a u32 which give us until 2106. See the link for details:
200          *
201          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
202          */
203         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
204                 goto out_err;
205
206         /* seq send */
207         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
208                 goto out_err;
209         kctx->kc_seq_send = tmp_uint;
210
211         /* mech oid */
212         if (gss_get_rawobj(&p, end, &kctx->kc_mech_used))
213                 goto out_err;
214
215         /* old style enc/seq keys in format:
216          *   - enctype (u32)
217          *   - keysize (u32)
218          *   - keydata
219          * we decompose them to fit into the new context
220          */
221
222         /* enc key */
223         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
224                 goto out_err;
225
226         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
227                 goto out_err;
228
229         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
230                 goto out_err;
231
232         /* seq key */
233         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
234             tmp_uint != kctx->kc_enctype)
235                 goto out_err;
236
237         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
238             tmp_uint != keysize)
239                 goto out_err;
240
241         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
242                 goto out_err;
243
244         /* old style fallback */
245         if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
246                 goto out_err;
247
248         if (p != end)
249                 goto out_err;
250
251         CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
252         return 0;
253 out_err:
254         return GSS_S_FAILURE;
255 }
256
257 /* Flags for version 2 context flags */
258 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
259 #define KRB5_CTX_FLAG_CFX               0x00000002
260 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
261
262 static
263 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
264 {
265         unsigned int    tmp_uint, keysize;
266
267         /* end time. While kc_endtime might be 64 bit the krb5 API
268          * still uses 32 bits. To delay the 2038 bug see the incoming
269          * value as a u32 which give us until 2106. See the link for details:
270          *
271          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
272          */
273         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
274                 goto out_err;
275
276         /* flags */
277         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
278                 goto out_err;
279
280         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
281                 kctx->kc_initiate = 1;
282         if (tmp_uint & KRB5_CTX_FLAG_CFX)
283                 kctx->kc_cfx = 1;
284         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
285                 kctx->kc_have_acceptor_subkey = 1;
286
287         /* seq send */
288         if (gss_get_bytes(&p, end, &kctx->kc_seq_send,
289             sizeof(kctx->kc_seq_send)))
290                 goto out_err;
291
292         /* enctype */
293         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
294                 goto out_err;
295
296         /* size of each key */
297         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
298                 goto out_err;
299
300         /* number of keys - should always be 3 */
301         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
302                 goto out_err;
303
304         if (tmp_uint != 3) {
305                 CERROR("Invalid number of keys: %u\n", tmp_uint);
306                 goto out_err;
307         }
308
309         /* ke */
310         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
311                 goto out_err;
312         /* ki */
313         if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize))
314                 goto out_err;
315         /* ki */
316         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
317                 goto out_err;
318
319         CDEBUG(D_SEC, "successfully imported v2 context\n");
320         return 0;
321 out_err:
322         return GSS_S_FAILURE;
323 }
324
325 /*
326  * The whole purpose here is trying to keep user level gss context parsing
327  * from nfs-utils unchanged as possible as we can, they are not quite mature
328  * yet, and many stuff still not clear, like heimdal etc.
329  */
330 static
331 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
332                                       struct gss_ctx *gctx)
333 {
334         struct krb5_ctx *kctx;
335         char *p = (char *)inbuf->data;
336         char *end = (char *)(inbuf->data + inbuf->len);
337         unsigned int tmp_uint, rc;
338
339         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
340                 CERROR("Fail to read version\n");
341                 return GSS_S_FAILURE;
342         }
343
344         /* only support 0, 1 for the moment */
345         if (tmp_uint > 2) {
346                 CERROR("Invalid version %u\n", tmp_uint);
347                 return GSS_S_FAILURE;
348         }
349
350         OBD_ALLOC_PTR(kctx);
351         if (!kctx)
352                 return GSS_S_FAILURE;
353
354         if (tmp_uint == 0 || tmp_uint == 1) {
355                 kctx->kc_initiate = tmp_uint;
356                 rc = import_context_rfc1964(kctx, p, end);
357         } else {
358                 rc = import_context_rfc4121(kctx, p, end);
359         }
360
361         if (rc == 0)
362                 rc = krb5_init_keys(kctx);
363
364         if (rc) {
365                 delete_context_kerberos(kctx);
366                 OBD_FREE_PTR(kctx);
367
368                 return GSS_S_FAILURE;
369         }
370
371         gctx->internal_ctx_id = kctx;
372         return GSS_S_COMPLETE;
373 }
374
375 static
376 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
377                                         struct gss_ctx *gctx_new)
378 {
379         struct krb5_ctx *kctx = gctx->internal_ctx_id;
380         struct krb5_ctx *knew;
381
382         OBD_ALLOC_PTR(knew);
383         if (!knew)
384                 return GSS_S_FAILURE;
385
386         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
387         knew->kc_cfx = kctx->kc_cfx;
388         knew->kc_seed_init = kctx->kc_seed_init;
389         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
390         knew->kc_endtime = kctx->kc_endtime;
391
392         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
393         knew->kc_seq_send = kctx->kc_seq_recv;
394         knew->kc_seq_recv = kctx->kc_seq_send;
395         knew->kc_enctype = kctx->kc_enctype;
396
397         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
398                 goto out_err;
399
400         if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
401                 goto out_err;
402         if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
403                 goto out_err;
404         if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
405                 goto out_err;
406         if (krb5_init_keys(knew))
407                 goto out_err;
408
409         gctx_new->internal_ctx_id = knew;
410         CDEBUG(D_SEC, "successfully copied reverse context\n");
411         return GSS_S_COMPLETE;
412
413 out_err:
414         delete_context_kerberos(knew);
415         OBD_FREE_PTR(knew);
416         return GSS_S_FAILURE;
417 }
418
419 static
420 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
421                                    time64_t *endtime)
422 {
423         struct krb5_ctx *kctx = gctx->internal_ctx_id;
424
425         *endtime = kctx->kc_endtime;
426         return GSS_S_COMPLETE;
427 }
428
429 static
430 void gss_delete_sec_context_kerberos(void *internal_ctx)
431 {
432         struct krb5_ctx *kctx = internal_ctx;
433
434         delete_context_kerberos(kctx);
435         OBD_FREE_PTR(kctx);
436 }
437
438 /*
439  * compute (keyed/keyless) checksum against the plain text which appended
440  * with krb5 wire token header.
441  */
442 static
443 __s32 krb5_make_checksum(__u32 enctype,
444                          struct gss_keyblock *kb,
445                          struct krb5_header *khdr,
446                          int msgcnt, rawobj_t *msgs,
447                          int iovcnt, lnet_kiov_t *iovs,
448                          rawobj_t *cksum)
449 {
450         struct krb5_enctype *ke = &enctypes[enctype];
451         struct cfs_crypto_hash_desc *desc = NULL;
452         enum cfs_crypto_hash_alg hash_algo;
453         rawobj_t hdr;
454         int rc;
455
456         hash_algo = cfs_crypto_hash_alg(ke->ke_hash_name);
457
458         /* For the cbc(des) case we want md5 instead of hmac(md5) */
459         if (strcmp(ke->ke_enc_name, "cbc(des)"))
460                 desc = cfs_crypto_hash_init(hash_algo, kb->kb_key.data,
461                                            kb->kb_key.len);
462         else
463                 desc = cfs_crypto_hash_init(hash_algo, NULL, 0);
464         if (IS_ERR(desc)) {
465                 rc = PTR_ERR(desc);
466                 CERROR("failed to alloc hash %s : rc = %d\n",
467                        ke->ke_hash_name, rc);
468                 goto out_no_hash;
469         }
470
471         cksum->len = cfs_crypto_hash_digestsize(hash_algo);
472         OBD_ALLOC_LARGE(cksum->data, cksum->len);
473         if (!cksum->data) {
474                 cksum->len = 0;
475                 rc = -ENOMEM;
476                 goto out_free_hash;
477         }
478
479         hdr.data = (__u8 *)khdr;
480         hdr.len = sizeof(*khdr);
481
482         rc = gss_digest_hash(desc, &hdr, msgcnt, msgs,
483                              iovcnt, iovs, cksum);
484         if (rc)
485                 goto out_free_hash;
486
487         if (!ke->ke_hash_hmac) {
488                 LASSERT(kb->kb_tfm);
489
490                 cfs_crypto_hash_final(desc, cksum->data, &cksum->len);
491                 rc = gss_crypt_generic(kb->kb_tfm, 0, NULL,
492                                        cksum->data, cksum->data,
493                                        cksum->len);
494                 goto out_no_hash;
495         }
496
497 out_free_hash:
498         if (desc)
499                 cfs_crypto_hash_final(desc, cksum->data, &cksum->len);
500 out_no_hash:
501         return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
502 }
503
504 static void fill_krb5_header(struct krb5_ctx *kctx,
505                              struct krb5_header *khdr,
506                              int privacy)
507 {
508         unsigned char acceptor_flag;
509
510         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
511
512         if (privacy) {
513                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
514                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
515                 khdr->kh_ec = cpu_to_be16(0);
516                 khdr->kh_rrc = cpu_to_be16(0);
517         } else {
518                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
519                 khdr->kh_flags = acceptor_flag;
520                 khdr->kh_ec = cpu_to_be16(0xffff);
521                 khdr->kh_rrc = cpu_to_be16(0xffff);
522         }
523
524         khdr->kh_filler = 0xff;
525         spin_lock(&krb5_seq_lock);
526         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
527         spin_unlock(&krb5_seq_lock);
528 }
529
530 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
531                                 struct krb5_header *khdr,
532                                 int privacy)
533 {
534         unsigned char acceptor_flag;
535         __u16         tok_id, ec_rrc;
536
537         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
538
539         if (privacy) {
540                 tok_id = KG_TOK_WRAP_MSG;
541                 ec_rrc = 0x0;
542         } else {
543                 tok_id = KG_TOK_MIC_MSG;
544                 ec_rrc = 0xffff;
545         }
546
547         /* sanity checks */
548         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
549                 CERROR("bad token id\n");
550                 return GSS_S_DEFECTIVE_TOKEN;
551         }
552         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
553                 CERROR("bad direction flag\n");
554                 return GSS_S_BAD_SIG;
555         }
556         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
557                 CERROR("missing confidential flag\n");
558                 return GSS_S_BAD_SIG;
559         }
560         if (khdr->kh_filler != 0xff) {
561                 CERROR("bad filler\n");
562                 return GSS_S_DEFECTIVE_TOKEN;
563         }
564         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
565             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
566                 CERROR("bad EC or RRC\n");
567                 return GSS_S_DEFECTIVE_TOKEN;
568         }
569         return GSS_S_COMPLETE;
570 }
571
572 static
573 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
574                            int msgcnt,
575                            rawobj_t *msgs,
576                            int iovcnt,
577                            lnet_kiov_t *iovs,
578                            rawobj_t *token)
579 {
580         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
581         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
582         struct krb5_header  *khdr;
583         rawobj_t cksum = RAWOBJ_EMPTY;
584         u32 major;
585
586         /* fill krb5 header */
587         LASSERT(token->len >= sizeof(*khdr));
588         khdr = (struct krb5_header *)token->data;
589         fill_krb5_header(kctx, khdr, 0);
590
591         /* checksum */
592         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, khdr,
593                                msgcnt, msgs, iovcnt, iovs, &cksum))
594                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
595
596         LASSERT(cksum.len >= ke->ke_hash_size);
597         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
598         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
599                ke->ke_hash_size);
600
601         token->len = sizeof(*khdr) + ke->ke_hash_size;
602         major = GSS_S_COMPLETE;
603 out_free_cksum:
604         rawobj_free(&cksum);
605         return major;
606 }
607
608 static
609 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
610                               int msgcnt,
611                               rawobj_t *msgs,
612                               int iovcnt,
613                               lnet_kiov_t *iovs,
614                               rawobj_t *token)
615 {
616         struct krb5_ctx *kctx = gctx->internal_ctx_id;
617         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
618         struct krb5_header *khdr;
619         rawobj_t cksum = RAWOBJ_EMPTY;
620         u32 major;
621
622         if (token->len < sizeof(*khdr)) {
623                 CERROR("short signature: %u\n", token->len);
624                 return GSS_S_DEFECTIVE_TOKEN;
625         }
626
627         khdr = (struct krb5_header *)token->data;
628
629         major = verify_krb5_header(kctx, khdr, 0);
630         if (major != GSS_S_COMPLETE) {
631                 CERROR("bad krb5 header\n");
632                 goto out;
633         }
634
635         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
636                 CERROR("short signature: %u, require %d\n",
637                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
638                 GOTO(out, major = GSS_S_FAILURE);
639         }
640
641         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
642                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
643                 CERROR("failed to make checksum\n");
644                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
645         }
646
647         LASSERT(cksum.len >= ke->ke_hash_size);
648         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
649                    ke->ke_hash_size)) {
650                 CERROR("checksum mismatch\n");
651                 GOTO(out_free_cksum, major = GSS_S_BAD_SIG);
652         }
653         major = GSS_S_COMPLETE;
654 out_free_cksum:
655         rawobj_free(&cksum);
656 out:
657         return major;
658 }
659
660 /*
661  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
662  */
663 static
664 int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
665                       struct krb5_header *khdr,
666                       char *confounder,
667                       struct ptlrpc_bulk_desc *desc,
668                       rawobj_t *cipher,
669                       int adj_nob)
670 {
671         struct blkcipher_desc   ciph_desc;
672         __u8                    local_iv[16] = {0};
673         struct scatterlist      src, dst;
674         struct sg_table         sg_src, sg_dst;
675         int                     blocksize, i, rc, nob = 0;
676
677         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
678         LASSERT(desc->bd_iov_count);
679         LASSERT(GET_ENC_KIOV(desc));
680
681         blocksize = crypto_blkcipher_blocksize(tfm);
682         LASSERT(blocksize > 1);
683         LASSERT(cipher->len == blocksize + sizeof(*khdr));
684
685         ciph_desc.tfm  = tfm;
686         ciph_desc.info = local_iv;
687         ciph_desc.flags = 0;
688
689         /* encrypt confounder */
690         rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
691         if (rc != 0)
692                 return rc;
693
694         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
695         if (rc != 0) {
696                 gss_teardown_sgtable(&sg_src);
697                 return rc;
698         }
699
700         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
701                                          sg_src.sgl, blocksize);
702
703         gss_teardown_sgtable(&sg_dst);
704         gss_teardown_sgtable(&sg_src);
705
706         if (rc) {
707                 CERROR("error to encrypt confounder: %d\n", rc);
708                 return rc;
709         }
710
711         /* encrypt clear pages */
712         for (i = 0; i < desc->bd_iov_count; i++) {
713                 sg_init_table(&src, 1);
714                 sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
715                             (BD_GET_KIOV(desc, i).kiov_len +
716                                 blocksize - 1) &
717                             (~(blocksize - 1)),
718                             BD_GET_KIOV(desc, i).kiov_offset);
719                 if (adj_nob)
720                         nob += src.length;
721                 sg_init_table(&dst, 1);
722                 sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
723                             src.length, src.offset);
724
725                 BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
726                 BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
727
728                 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
729                                                     src.length);
730                 if (rc) {
731                         CERROR("error to encrypt page: %d\n", rc);
732                         return rc;
733                 }
734         }
735
736         /* encrypt krb5 header */
737         rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
738         if (rc != 0)
739                 return rc;
740
741         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
742                            sizeof(*khdr));
743         if (rc != 0) {
744                 gss_teardown_sgtable(&sg_src);
745                 return rc;
746         }
747
748         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
749                                          sizeof(*khdr));
750
751         gss_teardown_sgtable(&sg_dst);
752         gss_teardown_sgtable(&sg_src);
753
754         if (rc) {
755                 CERROR("error to encrypt krb5 header: %d\n", rc);
756                 return rc;
757         }
758
759         if (adj_nob)
760                 desc->bd_nob = nob;
761
762         return 0;
763 }
764
765 /*
766  * desc->bd_nob_transferred is the size of cipher text received.
767  * desc->bd_nob is the target size of plain text supposed to be.
768  *
769  * if adj_nob != 0, we adjust each page's kiov_len to the actual
770  * plain text size.
771  * - for client read: we don't know data size for each page, so
772  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
773  *   be smaller, so we need to adjust it according to
774  *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
775  *   this means we DO NOT support the situation that server send an odd size
776  *   data in a page which is not the last one.
777  * - for server write: we knows exactly data size for each page being expected,
778  *   thus kiov_len is accurate already, so we should not adjust it at all.
779  *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
780  *   round_up(bd_iov[]->kiov_len) which
781  *   should have been done by prep_bulk().
782  */
783 static
784 int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
785                       struct krb5_header *khdr,
786                       struct ptlrpc_bulk_desc *desc,
787                       rawobj_t *cipher,
788                       rawobj_t *plain,
789                       int adj_nob)
790 {
791         struct blkcipher_desc   ciph_desc;
792         __u8                    local_iv[16] = {0};
793         struct scatterlist      src, dst;
794         struct sg_table         sg_src, sg_dst;
795         int                     ct_nob = 0, pt_nob = 0;
796         int                     blocksize, i, rc;
797
798         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
799         LASSERT(desc->bd_iov_count);
800         LASSERT(GET_ENC_KIOV(desc));
801         LASSERT(desc->bd_nob_transferred);
802
803         blocksize = crypto_blkcipher_blocksize(tfm);
804         LASSERT(blocksize > 1);
805         LASSERT(cipher->len == blocksize + sizeof(*khdr));
806
807         ciph_desc.tfm  = tfm;
808         ciph_desc.info = local_iv;
809         ciph_desc.flags = 0;
810
811         if (desc->bd_nob_transferred % blocksize) {
812                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
813                 return -EPROTO;
814         }
815
816         /* decrypt head (confounder) */
817         rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
818         if (rc != 0)
819                 return rc;
820
821         rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
822         if (rc != 0) {
823                 gss_teardown_sgtable(&sg_src);
824                 return rc;
825         }
826
827         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
828                                          sg_src.sgl, blocksize);
829
830         gss_teardown_sgtable(&sg_dst);
831         gss_teardown_sgtable(&sg_src);
832
833         if (rc) {
834                 CERROR("error to decrypt confounder: %d\n", rc);
835                 return rc;
836         }
837
838         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
839              i++) {
840                 if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
841                     != 0 ||
842                     BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
843                     != 0) {
844                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
845                                i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
846                                BD_GET_ENC_KIOV(desc, i).kiov_len,
847                                blocksize);
848                         return -EFAULT;
849                 }
850
851                 if (adj_nob) {
852                         if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
853                             desc->bd_nob_transferred)
854                                 BD_GET_ENC_KIOV(desc, i).kiov_len =
855                                         desc->bd_nob_transferred - ct_nob;
856
857                         BD_GET_KIOV(desc, i).kiov_len =
858                           BD_GET_ENC_KIOV(desc, i).kiov_len;
859                         if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
860                             desc->bd_nob)
861                                 BD_GET_KIOV(desc, i).kiov_len =
862                                   desc->bd_nob - pt_nob;
863                 } else {
864                         /* this should be guaranteed by LNET */
865                         LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
866                                 kiov_len <=
867                                 desc->bd_nob_transferred);
868                         LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
869                                 BD_GET_ENC_KIOV(desc, i).kiov_len);
870                 }
871
872                 if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
873                         continue;
874
875                 sg_init_table(&src, 1);
876                 sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
877                             BD_GET_ENC_KIOV(desc, i).kiov_len,
878                             BD_GET_ENC_KIOV(desc, i).kiov_offset);
879                 dst = src;
880                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
881                         sg_assign_page(&dst,
882                                        BD_GET_KIOV(desc, i).kiov_page);
883
884                 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
885                                                  src.length);
886                 if (rc) {
887                         CERROR("error to decrypt page: %d\n", rc);
888                         return rc;
889                 }
890
891                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
892                         memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
893                                BD_GET_KIOV(desc, i).kiov_offset,
894                                page_address(BD_GET_ENC_KIOV(desc, i).
895                                             kiov_page) +
896                                BD_GET_KIOV(desc, i).kiov_offset,
897                                BD_GET_KIOV(desc, i).kiov_len);
898                 }
899
900                 ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
901                 pt_nob += BD_GET_KIOV(desc, i).kiov_len;
902         }
903
904         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
905                 CERROR("%d cipher text transferred but only %d decrypted\n",
906                        desc->bd_nob_transferred, ct_nob);
907                 return -EFAULT;
908         }
909
910         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
911                 CERROR("%d plain text expected but only %d received\n",
912                        desc->bd_nob, pt_nob);
913                 return -EFAULT;
914         }
915
916         /* if needed, clear up the rest unused iovs */
917         if (adj_nob)
918                 while (i < desc->bd_iov_count)
919                         BD_GET_KIOV(desc, i++).kiov_len = 0;
920
921         /* decrypt tail (krb5 header) */
922         rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
923                                sizeof(*khdr));
924         if (rc != 0)
925                 return rc;
926
927         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
928                                sizeof(*khdr));
929         if (rc != 0) {
930                 gss_teardown_sgtable(&sg_src);
931                 return rc;
932         }
933
934         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
935                                          sizeof(*khdr));
936
937         gss_teardown_sgtable(&sg_src);
938         gss_teardown_sgtable(&sg_dst);
939
940         if (rc) {
941                 CERROR("error to decrypt tail: %d\n", rc);
942                 return rc;
943         }
944
945         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
946                 CERROR("krb5 header doesn't match\n");
947                 return -EACCES;
948         }
949
950         return 0;
951 }
952
953 static
954 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
955                         rawobj_t *gsshdr,
956                         rawobj_t *msg,
957                         int msg_buflen,
958                         rawobj_t *token)
959 {
960         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
961         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
962         struct krb5_header  *khdr;
963         int                  blocksize;
964         rawobj_t             cksum = RAWOBJ_EMPTY;
965         rawobj_t             data_desc[3], cipher;
966         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
967         __u8                 local_iv[16] = {0};
968         u32 major;
969         int                  rc = 0;
970
971         LASSERT(ke);
972         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
973         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
974                 ke->ke_conf_size >=
975                 crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
976
977         /*
978          * final token format:
979          * ---------------------------------------------------
980          * | krb5 header | cipher text | checksum (16 bytes) |
981          * ---------------------------------------------------
982          */
983
984         /* fill krb5 header */
985         LASSERT(token->len >= sizeof(*khdr));
986         khdr = (struct krb5_header *)token->data;
987         fill_krb5_header(kctx, khdr, 1);
988
989         /* generate confounder */
990         cfs_get_random_bytes(conf, ke->ke_conf_size);
991
992         /* get encryption blocksize. note kc_keye might not associated with
993          * a tfm, currently only for arcfour-hmac */
994         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
995                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
996                 blocksize = 1;
997         } else {
998                 LASSERT(kctx->kc_keye.kb_tfm);
999                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1000         }
1001         LASSERT(blocksize <= ke->ke_conf_size);
1002
1003         /* padding the message */
1004         if (gss_add_padding(msg, msg_buflen, blocksize))
1005                 return GSS_S_FAILURE;
1006
1007         /*
1008          * clear text layout for checksum:
1009          * ------------------------------------------------------
1010          * | confounder | gss header | clear msgs | krb5 header |
1011          * ------------------------------------------------------
1012          */
1013         data_desc[0].data = conf;
1014         data_desc[0].len = ke->ke_conf_size;
1015         data_desc[1].data = gsshdr->data;
1016         data_desc[1].len = gsshdr->len;
1017         data_desc[2].data = msg->data;
1018         data_desc[2].len = msg->len;
1019
1020         /* compute checksum */
1021         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1022                                khdr, 3, data_desc, 0, NULL, &cksum))
1023                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1024         LASSERT(cksum.len >= ke->ke_hash_size);
1025
1026         /*
1027          * clear text layout for encryption:
1028          * -----------------------------------------
1029          * | confounder | clear msgs | krb5 header |
1030          * -----------------------------------------
1031          */
1032         data_desc[0].data = conf;
1033         data_desc[0].len = ke->ke_conf_size;
1034         data_desc[1].data = msg->data;
1035         data_desc[1].len = msg->len;
1036         data_desc[2].data = (__u8 *) khdr;
1037         data_desc[2].len = sizeof(*khdr);
1038
1039         /* cipher text will be directly inplace */
1040         cipher.data = (__u8 *)(khdr + 1);
1041         cipher.len = token->len - sizeof(*khdr);
1042         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1043
1044         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1045                 rawobj_t arc4_keye = RAWOBJ_EMPTY;
1046                 struct crypto_blkcipher *arc4_tfm;
1047
1048                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1049                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1050                         CERROR("failed to obtain arc4 enc key\n");
1051                         GOTO(arc4_out_key, rc = -EACCES);
1052                 }
1053
1054                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1055                 if (IS_ERR(arc4_tfm)) {
1056                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1057                         GOTO(arc4_out_key, rc = -EACCES);
1058                 }
1059
1060                 if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1061                                                arc4_keye.len)) {
1062                         CERROR("failed to set arc4 key, len %d\n",
1063                                arc4_keye.len);
1064                         GOTO(arc4_out_tfm, rc = -EACCES);
1065                 }
1066
1067                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
1068                                        &cipher, 1);
1069 arc4_out_tfm:
1070                 crypto_free_blkcipher(arc4_tfm);
1071 arc4_out_key:
1072                 rawobj_free(&arc4_keye);
1073         } else {
1074                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
1075                                        data_desc, &cipher, 1);
1076         }
1077
1078         if (rc)
1079                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1080
1081         /* fill in checksum */
1082         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1083         memcpy((char *)(khdr + 1) + cipher.len,
1084                cksum.data + cksum.len - ke->ke_hash_size,
1085                ke->ke_hash_size);
1086
1087         /* final token length */
1088         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1089         major = GSS_S_COMPLETE;
1090 out_free_cksum:
1091         rawobj_free(&cksum);
1092         return major;
1093 }
1094
1095 static
1096 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1097                              struct ptlrpc_bulk_desc *desc)
1098 {
1099         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1100         int                  blocksize, i;
1101
1102         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1103         LASSERT(desc->bd_iov_count);
1104         LASSERT(GET_ENC_KIOV(desc));
1105         LASSERT(kctx->kc_keye.kb_tfm);
1106
1107         blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1108
1109         for (i = 0; i < desc->bd_iov_count; i++) {
1110                 LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
1111                 /*
1112                  * offset should always start at page boundary of either
1113                  * client or server side.
1114                  */
1115                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
1116                         CERROR("odd offset %d in page %d\n",
1117                                BD_GET_KIOV(desc, i).kiov_offset, i);
1118                         return GSS_S_FAILURE;
1119                 }
1120
1121                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
1122                         BD_GET_KIOV(desc, i).kiov_offset;
1123                 BD_GET_ENC_KIOV(desc, i).kiov_len =
1124                         (BD_GET_KIOV(desc, i).kiov_len +
1125                          blocksize - 1) & (~(blocksize - 1));
1126         }
1127
1128         return GSS_S_COMPLETE;
1129 }
1130
1131 static
1132 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1133                              struct ptlrpc_bulk_desc *desc,
1134                              rawobj_t *token, int adj_nob)
1135 {
1136         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1137         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1138         struct krb5_header  *khdr;
1139         int                  blocksize;
1140         rawobj_t             cksum = RAWOBJ_EMPTY;
1141         rawobj_t             data_desc[1], cipher;
1142         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1143         int rc = 0;
1144         u32 major;
1145
1146         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1147         LASSERT(ke);
1148         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1149
1150         /*
1151          * final token format:
1152          * --------------------------------------------------
1153          * | krb5 header | head/tail cipher text | checksum |
1154          * --------------------------------------------------
1155          */
1156
1157         /* fill krb5 header */
1158         LASSERT(token->len >= sizeof(*khdr));
1159         khdr = (struct krb5_header *)token->data;
1160         fill_krb5_header(kctx, khdr, 1);
1161
1162         /* generate confounder */
1163         cfs_get_random_bytes(conf, ke->ke_conf_size);
1164
1165         /* get encryption blocksize. note kc_keye might not associated with
1166          * a tfm, currently only for arcfour-hmac */
1167         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1168                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1169                 blocksize = 1;
1170         } else {
1171                 LASSERT(kctx->kc_keye.kb_tfm);
1172                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1173         }
1174
1175         /*
1176          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1177          * the bulk token size would be exactly (sizeof(krb5_header) +
1178          * blocksize + sizeof(krb5_header) + hashsize)
1179          */
1180         LASSERT(blocksize <= ke->ke_conf_size);
1181         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1182         LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1183
1184         /*
1185          * clear text layout for checksum:
1186          * ------------------------------------------
1187          * | confounder | clear pages | krb5 header |
1188          * ------------------------------------------
1189          */
1190         data_desc[0].data = conf;
1191         data_desc[0].len = ke->ke_conf_size;
1192
1193         /* compute checksum */
1194         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1195                                khdr, 1, data_desc,
1196                                desc->bd_iov_count, GET_KIOV(desc),
1197                                &cksum))
1198                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1199         LASSERT(cksum.len >= ke->ke_hash_size);
1200
1201         /*
1202          * clear text layout for encryption:
1203          * ------------------------------------------
1204          * | confounder | clear pages | krb5 header |
1205          * ------------------------------------------
1206          *        |              |             |
1207          *        ----------  (cipher pages)   |
1208          * result token:   |                   |
1209          * -------------------------------------------
1210          * | krb5 header | cipher text | cipher text |
1211          * -------------------------------------------
1212          */
1213         data_desc[0].data = conf;
1214         data_desc[0].len = ke->ke_conf_size;
1215
1216         cipher.data = (__u8 *)(khdr + 1);
1217         cipher.len = blocksize + sizeof(*khdr);
1218
1219         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1220                 LBUG();
1221                 rc = 0;
1222         } else {
1223                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1224                                        conf, desc, &cipher, adj_nob);
1225         }
1226         if (rc)
1227                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1228
1229         /* fill in checksum */
1230         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1231         memcpy((char *)(khdr + 1) + cipher.len,
1232                cksum.data + cksum.len - ke->ke_hash_size,
1233                ke->ke_hash_size);
1234
1235         /* final token length */
1236         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1237         major = GSS_S_COMPLETE;
1238 out_free_cksum:
1239         rawobj_free(&cksum);
1240         return major;
1241 }
1242
1243 static
1244 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1245                           rawobj_t        *gsshdr,
1246                           rawobj_t        *token,
1247                           rawobj_t        *msg)
1248 {
1249         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1250         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1251         struct krb5_header  *khdr;
1252         unsigned char       *tmpbuf;
1253         int                  blocksize, bodysize;
1254         rawobj_t             cksum = RAWOBJ_EMPTY;
1255         rawobj_t             cipher_in, plain_out;
1256         rawobj_t             hash_objs[3];
1257         int                  rc = 0;
1258         __u32                major;
1259         __u8                 local_iv[16] = {0};
1260
1261         LASSERT(ke);
1262
1263         if (token->len < sizeof(*khdr)) {
1264                 CERROR("short signature: %u\n", token->len);
1265                 return GSS_S_DEFECTIVE_TOKEN;
1266         }
1267
1268         khdr = (struct krb5_header *)token->data;
1269
1270         major = verify_krb5_header(kctx, khdr, 1);
1271         if (major != GSS_S_COMPLETE) {
1272                 CERROR("bad krb5 header\n");
1273                 return major;
1274         }
1275
1276         /* block size */
1277         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1278                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1279                 blocksize = 1;
1280         } else {
1281                 LASSERT(kctx->kc_keye.kb_tfm);
1282                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1283         }
1284
1285         /* expected token layout:
1286          * ----------------------------------------
1287          * | krb5 header | cipher text | checksum |
1288          * ----------------------------------------
1289          */
1290         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1291
1292         if (bodysize % blocksize) {
1293                 CERROR("odd bodysize %d\n", bodysize);
1294                 return GSS_S_DEFECTIVE_TOKEN;
1295         }
1296
1297         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1298                 CERROR("incomplete token: bodysize %d\n", bodysize);
1299                 return GSS_S_DEFECTIVE_TOKEN;
1300         }
1301
1302         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1303                 CERROR("buffer too small: %u, require %d\n",
1304                        msg->len, bodysize - ke->ke_conf_size);
1305                 return GSS_S_FAILURE;
1306         }
1307
1308         /* decrypting */
1309         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1310         if (!tmpbuf)
1311                 return GSS_S_FAILURE;
1312
1313         major = GSS_S_FAILURE;
1314
1315         cipher_in.data = (__u8 *)(khdr + 1);
1316         cipher_in.len = bodysize;
1317         plain_out.data = tmpbuf;
1318         plain_out.len = bodysize;
1319
1320         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1321                 rawobj_t                 arc4_keye;
1322                 struct crypto_blkcipher *arc4_tfm;
1323
1324                 cksum.data = token->data + token->len - ke->ke_hash_size;
1325                 cksum.len = ke->ke_hash_size;
1326
1327                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1328                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1329                         CERROR("failed to obtain arc4 enc key\n");
1330                         GOTO(arc4_out, rc = -EACCES);
1331                 }
1332
1333                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1334                 if (IS_ERR(arc4_tfm)) {
1335                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1336                         GOTO(arc4_out_key, rc = -EACCES);
1337                 }
1338
1339                 if (crypto_blkcipher_setkey(arc4_tfm,
1340                                          arc4_keye.data, arc4_keye.len)) {
1341                         CERROR("failed to set arc4 key, len %d\n",
1342                                arc4_keye.len);
1343                         GOTO(arc4_out_tfm, rc = -EACCES);
1344                 }
1345
1346                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
1347                                        &plain_out, 0);
1348 arc4_out_tfm:
1349                 crypto_free_blkcipher(arc4_tfm);
1350 arc4_out_key:
1351                 rawobj_free(&arc4_keye);
1352 arc4_out:
1353                 cksum = RAWOBJ_EMPTY;
1354         } else {
1355                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1,
1356                                        &cipher_in, &plain_out, 0);
1357         }
1358
1359         if (rc != 0) {
1360                 CERROR("error decrypt\n");
1361                 goto out_free;
1362         }
1363         LASSERT(plain_out.len == bodysize);
1364
1365         /* expected clear text layout:
1366          * -----------------------------------------
1367          * | confounder | clear msgs | krb5 header |
1368          * -----------------------------------------
1369          */
1370
1371         /* verify krb5 header in token is not modified */
1372         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1373                    sizeof(*khdr))) {
1374                 CERROR("decrypted krb5 header mismatch\n");
1375                 goto out_free;
1376         }
1377
1378         /* verify checksum, compose clear text as layout:
1379          * ------------------------------------------------------
1380          * | confounder | gss header | clear msgs | krb5 header |
1381          * ------------------------------------------------------
1382          */
1383         hash_objs[0].len = ke->ke_conf_size;
1384         hash_objs[0].data = plain_out.data;
1385         hash_objs[1].len = gsshdr->len;
1386         hash_objs[1].data = gsshdr->data;
1387         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1388         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1389         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1390                                khdr, 3, hash_objs, 0, NULL, &cksum))
1391                 goto out_free;
1392
1393         LASSERT(cksum.len >= ke->ke_hash_size);
1394         if (memcmp((char *)(khdr + 1) + bodysize,
1395                    cksum.data + cksum.len - ke->ke_hash_size,
1396                    ke->ke_hash_size)) {
1397                 CERROR("checksum mismatch\n");
1398                 goto out_free;
1399         }
1400
1401         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1402         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1403
1404         major = GSS_S_COMPLETE;
1405 out_free:
1406         OBD_FREE_LARGE(tmpbuf, bodysize);
1407         rawobj_free(&cksum);
1408         return major;
1409 }
1410
1411 static
1412 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1413                                struct ptlrpc_bulk_desc *desc,
1414                                rawobj_t *token, int adj_nob)
1415 {
1416         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1417         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1418         struct krb5_header  *khdr;
1419         int                  blocksize;
1420         rawobj_t             cksum = RAWOBJ_EMPTY;
1421         rawobj_t             cipher, plain;
1422         rawobj_t             data_desc[1];
1423         int                  rc;
1424         __u32                major;
1425
1426         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1427         LASSERT(ke);
1428
1429         if (token->len < sizeof(*khdr)) {
1430                 CERROR("short signature: %u\n", token->len);
1431                 return GSS_S_DEFECTIVE_TOKEN;
1432         }
1433
1434         khdr = (struct krb5_header *)token->data;
1435
1436         major = verify_krb5_header(kctx, khdr, 1);
1437         if (major != GSS_S_COMPLETE) {
1438                 CERROR("bad krb5 header\n");
1439                 return major;
1440         }
1441
1442         /* block size */
1443         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1444                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1445                 blocksize = 1;
1446                 LBUG();
1447         } else {
1448                 LASSERT(kctx->kc_keye.kb_tfm);
1449                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1450         }
1451         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1452
1453         /*
1454          * token format is expected as:
1455          * -----------------------------------------------
1456          * | krb5 header | head/tail cipher text | cksum |
1457          * -----------------------------------------------
1458          */
1459         if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1460                          ke->ke_hash_size) {
1461                 CERROR("short token size: %u\n", token->len);
1462                 return GSS_S_DEFECTIVE_TOKEN;
1463         }
1464
1465         cipher.data = (__u8 *) (khdr + 1);
1466         cipher.len = blocksize + sizeof(*khdr);
1467         plain.data = cipher.data;
1468         plain.len = cipher.len;
1469
1470         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1471                                desc, &cipher, &plain, adj_nob);
1472         if (rc)
1473                 return GSS_S_DEFECTIVE_TOKEN;
1474
1475         /*
1476          * verify checksum, compose clear text as layout:
1477          * ------------------------------------------
1478          * | confounder | clear pages | krb5 header |
1479          * ------------------------------------------
1480          */
1481         data_desc[0].data = plain.data;
1482         data_desc[0].len = blocksize;
1483
1484         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1485                                khdr, 1, data_desc,
1486                                desc->bd_iov_count,
1487                                GET_KIOV(desc),
1488                                &cksum))
1489                 return GSS_S_FAILURE;
1490         LASSERT(cksum.len >= ke->ke_hash_size);
1491
1492         if (memcmp(plain.data + blocksize + sizeof(*khdr),
1493                    cksum.data + cksum.len - ke->ke_hash_size,
1494                    ke->ke_hash_size)) {
1495                 CERROR("checksum mismatch\n");
1496                 rawobj_free(&cksum);
1497                 return GSS_S_BAD_SIG;
1498         }
1499
1500         rawobj_free(&cksum);
1501         return GSS_S_COMPLETE;
1502 }
1503
1504 int gss_display_kerberos(struct gss_ctx        *ctx,
1505                          char                  *buf,
1506                          int                    bufsize)
1507 {
1508         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1509         int                 written;
1510
1511         written = snprintf(buf, bufsize, "krb5 (%s)",
1512                            enctype2str(kctx->kc_enctype));
1513         return written;
1514 }
1515
1516 static struct gss_api_ops gss_kerberos_ops = {
1517         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1518         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1519         .gss_inquire_context        = gss_inquire_context_kerberos,
1520         .gss_get_mic                = gss_get_mic_kerberos,
1521         .gss_verify_mic             = gss_verify_mic_kerberos,
1522         .gss_wrap                   = gss_wrap_kerberos,
1523         .gss_unwrap                 = gss_unwrap_kerberos,
1524         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1525         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1526         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1527         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1528         .gss_display                = gss_display_kerberos,
1529 };
1530
1531 static struct subflavor_desc gss_kerberos_sfs[] = {
1532         {
1533                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1534                 .sf_qop         = 0,
1535                 .sf_service     = SPTLRPC_SVC_NULL,
1536                 .sf_name        = "krb5n"
1537         },
1538         {
1539                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1540                 .sf_qop         = 0,
1541                 .sf_service     = SPTLRPC_SVC_AUTH,
1542                 .sf_name        = "krb5a"
1543         },
1544         {
1545                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1546                 .sf_qop         = 0,
1547                 .sf_service     = SPTLRPC_SVC_INTG,
1548                 .sf_name        = "krb5i"
1549         },
1550         {
1551                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1552                 .sf_qop         = 0,
1553                 .sf_service     = SPTLRPC_SVC_PRIV,
1554                 .sf_name        = "krb5p"
1555         },
1556 };
1557
1558 static struct gss_api_mech gss_kerberos_mech = {
1559         /* .gm_owner uses default NULL value for THIS_MODULE */
1560         .gm_name        = "krb5",
1561         .gm_oid         = (rawobj_t)
1562                                 {9, "\052\206\110\206\367\022\001\002\002"},
1563         .gm_ops         = &gss_kerberos_ops,
1564         .gm_sf_num      = 4,
1565         .gm_sfs         = gss_kerberos_sfs,
1566 };
1567
1568 int __init init_kerberos_module(void)
1569 {
1570         int status;
1571
1572         spin_lock_init(&krb5_seq_lock);
1573
1574         status = lgss_mech_register(&gss_kerberos_mech);
1575         if (status)
1576                 CERROR("Failed to register kerberos gss mechanism!\n");
1577         return status;
1578 }
1579
1580 void cleanup_kerberos_module(void)
1581 {
1582         lgss_mech_unregister(&gss_kerberos_mech);
1583 }