Whamcloud - gitweb
LU-12816 ptlrpc: ptlrpc_register_bulk LBUG on ENOMEM
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, 2015, Intel Corporation.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/crypto.h>
56 #include <linux/mutex.h>
57
58 #include <obd.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre_net.h>
62 #include <lustre_import.h>
63 #include <lustre_sec.h>
64
65 #include "gss_err.h"
66 #include "gss_internal.h"
67 #include "gss_api.h"
68 #include "gss_asn1.h"
69 #include "gss_krb5.h"
70 #include "gss_crypto.h"
71
72 static spinlock_t krb5_seq_lock;
73
74 struct krb5_enctype {
75         char           *ke_dispname;
76         char           *ke_enc_name;            /* linux tfm name */
77         char           *ke_hash_name;           /* linux tfm name */
78         int             ke_enc_mode;            /* linux tfm mode */
79         int             ke_hash_size;           /* checksum size */
80         int             ke_conf_size;           /* confounder size */
81         unsigned int    ke_hash_hmac:1;         /* is hmac? */
82 };
83
84 /*
85  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
86  * but currently we simply CBC with padding, because linux doesn't support CTS
87  * yet. this need to be fixed in the future.
88  */
89 static struct krb5_enctype enctypes[] = {
90         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
91                 .ke_dispname    = "des-cbc-md5",
92                 .ke_enc_name    = "cbc(des)",
93                 .ke_hash_name   = "md5",
94                 .ke_hash_size   = 16,
95                 .ke_conf_size   = 8,
96         },
97         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
98                 .ke_dispname    = "des3-hmac-sha1",
99                 .ke_enc_name    = "cbc(des3_ede)",
100                 .ke_hash_name   = "sha1",
101                 .ke_hash_size   = 20,
102                 .ke_conf_size   = 8,
103                 .ke_hash_hmac   = 1,
104         },
105         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
106                 .ke_dispname    = "aes128-cts-hmac-sha1-96",
107                 .ke_enc_name    = "cbc(aes)",
108                 .ke_hash_name   = "sha1",
109                 .ke_hash_size   = 12,
110                 .ke_conf_size   = 16,
111                 .ke_hash_hmac   = 1,
112         },
113         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
114                 .ke_dispname    = "aes256-cts-hmac-sha1-96",
115                 .ke_enc_name    = "cbc(aes)",
116                 .ke_hash_name   = "sha1",
117                 .ke_hash_size   = 12,
118                 .ke_conf_size   = 16,
119                 .ke_hash_hmac   = 1,
120         },
121         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
122                 .ke_dispname    = "arcfour-hmac-md5",
123                 .ke_enc_name    = "ecb(arc4)",
124                 .ke_hash_name   = "md5",
125                 .ke_hash_size   = 16,
126                 .ke_conf_size   = 8,
127                 .ke_hash_hmac   = 1,
128         }
129 };
130
131 static const char * enctype2str(__u32 enctype)
132 {
133         if (enctype < ARRAY_SIZE(enctypes) && enctypes[enctype].ke_dispname)
134                 return enctypes[enctype].ke_dispname;
135
136         return "unknown";
137 }
138
139 static
140 int krb5_init_keys(struct krb5_ctx *kctx)
141 {
142         struct krb5_enctype *ke;
143
144         if (kctx->kc_enctype >= ARRAY_SIZE(enctypes) ||
145             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
146                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
147                 return -1;
148         }
149
150         ke = &enctypes[kctx->kc_enctype];
151
152         /* tfm arc4 is stateful, user should alloc-use-free by his own */
153         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
154             gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
155                 return -1;
156
157         /* tfm hmac is stateful, user should alloc-use-free by his own */
158         if (ke->ke_hash_hmac == 0 &&
159             gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
160                 return -1;
161         if (ke->ke_hash_hmac == 0 &&
162             gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
163                 return -1;
164
165         return 0;
166 }
167
168 static
169 void delete_context_kerberos(struct krb5_ctx *kctx)
170 {
171         rawobj_free(&kctx->kc_mech_used);
172
173         gss_keyblock_free(&kctx->kc_keye);
174         gss_keyblock_free(&kctx->kc_keyi);
175         gss_keyblock_free(&kctx->kc_keyc);
176 }
177
178 static
179 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
180 {
181         unsigned int    tmp_uint, keysize;
182
183         /* seed_init flag */
184         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
185                 goto out_err;
186         kctx->kc_seed_init = (tmp_uint != 0);
187
188         /* seed */
189         if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
190                 goto out_err;
191
192         /* sign/seal algorithm, not really used now */
193         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
194             gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
195                 goto out_err;
196
197         /* end time. While kc_endtime might be 64 bit the krb5 API
198          * still uses 32 bits. To delay the 2038 bug see the incoming
199          * value as a u32 which give us until 2106. See the link for details:
200          *
201          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
202          */
203         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
204                 goto out_err;
205
206         /* seq send */
207         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
208                 goto out_err;
209         kctx->kc_seq_send = tmp_uint;
210
211         /* mech oid */
212         if (gss_get_rawobj(&p, end, &kctx->kc_mech_used))
213                 goto out_err;
214
215         /* old style enc/seq keys in format:
216          *   - enctype (u32)
217          *   - keysize (u32)
218          *   - keydata
219          * we decompose them to fit into the new context
220          */
221
222         /* enc key */
223         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
224                 goto out_err;
225
226         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
227                 goto out_err;
228
229         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
230                 goto out_err;
231
232         /* seq key */
233         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
234             tmp_uint != kctx->kc_enctype)
235                 goto out_err;
236
237         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
238             tmp_uint != keysize)
239                 goto out_err;
240
241         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
242                 goto out_err;
243
244         /* old style fallback */
245         if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
246                 goto out_err;
247
248         if (p != end)
249                 goto out_err;
250
251         CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
252         return 0;
253 out_err:
254         return GSS_S_FAILURE;
255 }
256
257 /* Flags for version 2 context flags */
258 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
259 #define KRB5_CTX_FLAG_CFX               0x00000002
260 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
261
262 static
263 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
264 {
265         unsigned int    tmp_uint, keysize;
266
267         /* end time. While kc_endtime might be 64 bit the krb5 API
268          * still uses 32 bits. To delay the 2038 bug see the incoming
269          * value as a u32 which give us until 2106. See the link for details:
270          *
271          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
272          */
273         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
274                 goto out_err;
275
276         /* flags */
277         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
278                 goto out_err;
279
280         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
281                 kctx->kc_initiate = 1;
282         if (tmp_uint & KRB5_CTX_FLAG_CFX)
283                 kctx->kc_cfx = 1;
284         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
285                 kctx->kc_have_acceptor_subkey = 1;
286
287         /* seq send */
288         if (gss_get_bytes(&p, end, &kctx->kc_seq_send,
289             sizeof(kctx->kc_seq_send)))
290                 goto out_err;
291
292         /* enctype */
293         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
294                 goto out_err;
295
296         /* size of each key */
297         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
298                 goto out_err;
299
300         /* number of keys - should always be 3 */
301         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
302                 goto out_err;
303
304         if (tmp_uint != 3) {
305                 CERROR("Invalid number of keys: %u\n", tmp_uint);
306                 goto out_err;
307         }
308
309         /* ke */
310         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
311                 goto out_err;
312         /* ki */
313         if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize))
314                 goto out_err;
315         /* ki */
316         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
317                 goto out_err;
318
319         CDEBUG(D_SEC, "successfully imported v2 context\n");
320         return 0;
321 out_err:
322         return GSS_S_FAILURE;
323 }
324
325 /*
326  * The whole purpose here is trying to keep user level gss context parsing
327  * from nfs-utils unchanged as possible as we can, they are not quite mature
328  * yet, and many stuff still not clear, like heimdal etc.
329  */
330 static
331 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
332                                       struct gss_ctx *gctx)
333 {
334         struct krb5_ctx *kctx;
335         char *p = (char *)inbuf->data;
336         char *end = (char *)(inbuf->data + inbuf->len);
337         unsigned int tmp_uint, rc;
338
339         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
340                 CERROR("Fail to read version\n");
341                 return GSS_S_FAILURE;
342         }
343
344         /* only support 0, 1 for the moment */
345         if (tmp_uint > 2) {
346                 CERROR("Invalid version %u\n", tmp_uint);
347                 return GSS_S_FAILURE;
348         }
349
350         OBD_ALLOC_PTR(kctx);
351         if (!kctx)
352                 return GSS_S_FAILURE;
353
354         if (tmp_uint == 0 || tmp_uint == 1) {
355                 kctx->kc_initiate = tmp_uint;
356                 rc = import_context_rfc1964(kctx, p, end);
357         } else {
358                 rc = import_context_rfc4121(kctx, p, end);
359         }
360
361         if (rc == 0)
362                 rc = krb5_init_keys(kctx);
363
364         if (rc) {
365                 delete_context_kerberos(kctx);
366                 OBD_FREE_PTR(kctx);
367
368                 return GSS_S_FAILURE;
369         }
370
371         gctx->internal_ctx_id = kctx;
372         return GSS_S_COMPLETE;
373 }
374
375 static
376 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
377                                         struct gss_ctx *gctx_new)
378 {
379         struct krb5_ctx *kctx = gctx->internal_ctx_id;
380         struct krb5_ctx *knew;
381
382         OBD_ALLOC_PTR(knew);
383         if (!knew)
384                 return GSS_S_FAILURE;
385
386         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
387         knew->kc_cfx = kctx->kc_cfx;
388         knew->kc_seed_init = kctx->kc_seed_init;
389         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
390         knew->kc_endtime = kctx->kc_endtime;
391
392         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
393         knew->kc_seq_send = kctx->kc_seq_recv;
394         knew->kc_seq_recv = kctx->kc_seq_send;
395         knew->kc_enctype = kctx->kc_enctype;
396
397         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
398                 goto out_err;
399
400         if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
401                 goto out_err;
402         if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
403                 goto out_err;
404         if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
405                 goto out_err;
406         if (krb5_init_keys(knew))
407                 goto out_err;
408
409         gctx_new->internal_ctx_id = knew;
410         CDEBUG(D_SEC, "successfully copied reverse context\n");
411         return GSS_S_COMPLETE;
412
413 out_err:
414         delete_context_kerberos(knew);
415         OBD_FREE_PTR(knew);
416         return GSS_S_FAILURE;
417 }
418
419 static
420 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
421                                    time64_t *endtime)
422 {
423         struct krb5_ctx *kctx = gctx->internal_ctx_id;
424
425         *endtime = kctx->kc_endtime;
426         return GSS_S_COMPLETE;
427 }
428
429 static
430 void gss_delete_sec_context_kerberos(void *internal_ctx)
431 {
432         struct krb5_ctx *kctx = internal_ctx;
433
434         delete_context_kerberos(kctx);
435         OBD_FREE_PTR(kctx);
436 }
437
438 /*
439  * compute (keyed/keyless) checksum against the plain text which appended
440  * with krb5 wire token header.
441  */
442 static
443 __s32 krb5_make_checksum(__u32 enctype,
444                          struct gss_keyblock *kb,
445                          struct krb5_header *khdr,
446                          int msgcnt, rawobj_t *msgs,
447                          int iovcnt, lnet_kiov_t *iovs,
448                          rawobj_t *cksum,
449                          digest_hash hash_func)
450 {
451         struct krb5_enctype *ke = &enctypes[enctype];
452         struct ahash_request *req = NULL;
453         enum cfs_crypto_hash_alg hash_algo;
454         rawobj_t hdr;
455         int rc;
456
457         hash_algo = cfs_crypto_hash_alg(ke->ke_hash_name);
458
459         /* For the cbc(des) case we want md5 instead of hmac(md5) */
460         if (strcmp(ke->ke_enc_name, "cbc(des)"))
461                 req = cfs_crypto_hash_init(hash_algo, kb->kb_key.data,
462                                            kb->kb_key.len);
463         else
464                 req = cfs_crypto_hash_init(hash_algo, NULL, 0);
465         if (IS_ERR(req)) {
466                 rc = PTR_ERR(req);
467                 CERROR("failed to alloc hash %s : rc = %d\n",
468                        ke->ke_hash_name, rc);
469                 goto out_no_hash;
470         }
471
472         cksum->len = cfs_crypto_hash_digestsize(hash_algo);
473         OBD_ALLOC_LARGE(cksum->data, cksum->len);
474         if (!cksum->data) {
475                 cksum->len = 0;
476                 rc = -ENOMEM;
477                 goto out_free_hash;
478         }
479
480         hdr.data = (__u8 *)khdr;
481         hdr.len = sizeof(*khdr);
482
483         if (!hash_func) {
484                 rc = -EPROTO;
485                 CERROR("hash function for %s undefined\n",
486                        ke->ke_hash_name);
487                 goto out_free_hash;
488         }
489         rc = hash_func(req, &hdr, msgcnt, msgs, iovcnt, iovs);
490         if (rc)
491                 goto out_free_hash;
492
493         if (!ke->ke_hash_hmac) {
494                 LASSERT(kb->kb_tfm);
495
496                 cfs_crypto_hash_final(req, cksum->data, &cksum->len);
497                 rc = gss_crypt_generic(kb->kb_tfm, 0, NULL,
498                                        cksum->data, cksum->data,
499                                        cksum->len);
500                 goto out_no_hash;
501         }
502
503 out_free_hash:
504         if (req)
505                 cfs_crypto_hash_final(req, cksum->data, &cksum->len);
506 out_no_hash:
507         return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
508 }
509
510 static void fill_krb5_header(struct krb5_ctx *kctx,
511                              struct krb5_header *khdr,
512                              int privacy)
513 {
514         unsigned char acceptor_flag;
515
516         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
517
518         if (privacy) {
519                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
520                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
521                 khdr->kh_ec = cpu_to_be16(0);
522                 khdr->kh_rrc = cpu_to_be16(0);
523         } else {
524                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
525                 khdr->kh_flags = acceptor_flag;
526                 khdr->kh_ec = cpu_to_be16(0xffff);
527                 khdr->kh_rrc = cpu_to_be16(0xffff);
528         }
529
530         khdr->kh_filler = 0xff;
531         spin_lock(&krb5_seq_lock);
532         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
533         spin_unlock(&krb5_seq_lock);
534 }
535
536 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
537                                 struct krb5_header *khdr,
538                                 int privacy)
539 {
540         unsigned char acceptor_flag;
541         __u16         tok_id, ec_rrc;
542
543         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
544
545         if (privacy) {
546                 tok_id = KG_TOK_WRAP_MSG;
547                 ec_rrc = 0x0;
548         } else {
549                 tok_id = KG_TOK_MIC_MSG;
550                 ec_rrc = 0xffff;
551         }
552
553         /* sanity checks */
554         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
555                 CERROR("bad token id\n");
556                 return GSS_S_DEFECTIVE_TOKEN;
557         }
558         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
559                 CERROR("bad direction flag\n");
560                 return GSS_S_BAD_SIG;
561         }
562         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
563                 CERROR("missing confidential flag\n");
564                 return GSS_S_BAD_SIG;
565         }
566         if (khdr->kh_filler != 0xff) {
567                 CERROR("bad filler\n");
568                 return GSS_S_DEFECTIVE_TOKEN;
569         }
570         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
571             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
572                 CERROR("bad EC or RRC\n");
573                 return GSS_S_DEFECTIVE_TOKEN;
574         }
575         return GSS_S_COMPLETE;
576 }
577
578 static
579 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
580                            int msgcnt,
581                            rawobj_t *msgs,
582                            int iovcnt,
583                            lnet_kiov_t *iovs,
584                            rawobj_t *token)
585 {
586         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
587         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
588         struct krb5_header  *khdr;
589         rawobj_t cksum = RAWOBJ_EMPTY;
590         u32 major;
591
592         /* fill krb5 header */
593         LASSERT(token->len >= sizeof(*khdr));
594         khdr = (struct krb5_header *)token->data;
595         fill_krb5_header(kctx, khdr, 0);
596
597         /* checksum */
598         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, khdr,
599                                msgcnt, msgs, iovcnt, iovs, &cksum,
600                                gctx->hash_func))
601                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
602
603         LASSERT(cksum.len >= ke->ke_hash_size);
604         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
605         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
606                ke->ke_hash_size);
607
608         token->len = sizeof(*khdr) + ke->ke_hash_size;
609         major = GSS_S_COMPLETE;
610 out_free_cksum:
611         rawobj_free(&cksum);
612         return major;
613 }
614
615 static
616 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
617                               int msgcnt,
618                               rawobj_t *msgs,
619                               int iovcnt,
620                               lnet_kiov_t *iovs,
621                               rawobj_t *token)
622 {
623         struct krb5_ctx *kctx = gctx->internal_ctx_id;
624         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
625         struct krb5_header *khdr;
626         rawobj_t cksum = RAWOBJ_EMPTY;
627         u32 major;
628
629         if (token->len < sizeof(*khdr)) {
630                 CERROR("short signature: %u\n", token->len);
631                 return GSS_S_DEFECTIVE_TOKEN;
632         }
633
634         khdr = (struct krb5_header *)token->data;
635
636         major = verify_krb5_header(kctx, khdr, 0);
637         if (major != GSS_S_COMPLETE) {
638                 CERROR("bad krb5 header\n");
639                 goto out;
640         }
641
642         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
643                 CERROR("short signature: %u, require %d\n",
644                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
645                 GOTO(out, major = GSS_S_FAILURE);
646         }
647
648         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
649                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum,
650                                gctx->hash_func))
651                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
652
653         LASSERT(cksum.len >= ke->ke_hash_size);
654         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
655                    ke->ke_hash_size)) {
656                 CERROR("checksum mismatch\n");
657                 GOTO(out_free_cksum, major = GSS_S_BAD_SIG);
658         }
659         major = GSS_S_COMPLETE;
660 out_free_cksum:
661         rawobj_free(&cksum);
662 out:
663         return major;
664 }
665
666 /*
667  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
668  */
669 static
670 int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
671                       struct krb5_header *khdr,
672                       char *confounder,
673                       struct ptlrpc_bulk_desc *desc,
674                       rawobj_t *cipher,
675                       int adj_nob)
676 {
677         struct blkcipher_desc   ciph_desc;
678         __u8                    local_iv[16] = {0};
679         struct scatterlist      src, dst;
680         struct sg_table         sg_src, sg_dst;
681         int                     blocksize, i, rc, nob = 0;
682
683         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
684         LASSERT(desc->bd_iov_count);
685         LASSERT(GET_ENC_KIOV(desc));
686
687         blocksize = crypto_blkcipher_blocksize(tfm);
688         LASSERT(blocksize > 1);
689         LASSERT(cipher->len == blocksize + sizeof(*khdr));
690
691         ciph_desc.tfm  = tfm;
692         ciph_desc.info = local_iv;
693         ciph_desc.flags = 0;
694
695         /* encrypt confounder */
696         rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
697         if (rc != 0)
698                 return rc;
699
700         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
701         if (rc != 0) {
702                 gss_teardown_sgtable(&sg_src);
703                 return rc;
704         }
705
706         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
707                                          sg_src.sgl, blocksize);
708
709         gss_teardown_sgtable(&sg_dst);
710         gss_teardown_sgtable(&sg_src);
711
712         if (rc) {
713                 CERROR("error to encrypt confounder: %d\n", rc);
714                 return rc;
715         }
716
717         /* encrypt clear pages */
718         for (i = 0; i < desc->bd_iov_count; i++) {
719                 sg_init_table(&src, 1);
720                 sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
721                             (BD_GET_KIOV(desc, i).kiov_len +
722                                 blocksize - 1) &
723                             (~(blocksize - 1)),
724                             BD_GET_KIOV(desc, i).kiov_offset);
725                 if (adj_nob)
726                         nob += src.length;
727                 sg_init_table(&dst, 1);
728                 sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
729                             src.length, src.offset);
730
731                 BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
732                 BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
733
734                 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
735                                                     src.length);
736                 if (rc) {
737                         CERROR("error to encrypt page: %d\n", rc);
738                         return rc;
739                 }
740         }
741
742         /* encrypt krb5 header */
743         rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
744         if (rc != 0)
745                 return rc;
746
747         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
748                            sizeof(*khdr));
749         if (rc != 0) {
750                 gss_teardown_sgtable(&sg_src);
751                 return rc;
752         }
753
754         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
755                                          sizeof(*khdr));
756
757         gss_teardown_sgtable(&sg_dst);
758         gss_teardown_sgtable(&sg_src);
759
760         if (rc) {
761                 CERROR("error to encrypt krb5 header: %d\n", rc);
762                 return rc;
763         }
764
765         if (adj_nob)
766                 desc->bd_nob = nob;
767
768         return 0;
769 }
770
771 /*
772  * desc->bd_nob_transferred is the size of cipher text received.
773  * desc->bd_nob is the target size of plain text supposed to be.
774  *
775  * if adj_nob != 0, we adjust each page's kiov_len to the actual
776  * plain text size.
777  * - for client read: we don't know data size for each page, so
778  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
779  *   be smaller, so we need to adjust it according to
780  *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
781  *   this means we DO NOT support the situation that server send an odd size
782  *   data in a page which is not the last one.
783  * - for server write: we knows exactly data size for each page being expected,
784  *   thus kiov_len is accurate already, so we should not adjust it at all.
785  *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
786  *   round_up(bd_iov[]->kiov_len) which
787  *   should have been done by prep_bulk().
788  */
789 static
790 int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
791                       struct krb5_header *khdr,
792                       struct ptlrpc_bulk_desc *desc,
793                       rawobj_t *cipher,
794                       rawobj_t *plain,
795                       int adj_nob)
796 {
797         struct blkcipher_desc   ciph_desc;
798         __u8                    local_iv[16] = {0};
799         struct scatterlist      src, dst;
800         struct sg_table         sg_src, sg_dst;
801         int                     ct_nob = 0, pt_nob = 0;
802         int                     blocksize, i, rc;
803
804         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
805         LASSERT(desc->bd_iov_count);
806         LASSERT(GET_ENC_KIOV(desc));
807         LASSERT(desc->bd_nob_transferred);
808
809         blocksize = crypto_blkcipher_blocksize(tfm);
810         LASSERT(blocksize > 1);
811         LASSERT(cipher->len == blocksize + sizeof(*khdr));
812
813         ciph_desc.tfm  = tfm;
814         ciph_desc.info = local_iv;
815         ciph_desc.flags = 0;
816
817         if (desc->bd_nob_transferred % blocksize) {
818                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
819                 return -EPROTO;
820         }
821
822         /* decrypt head (confounder) */
823         rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
824         if (rc != 0)
825                 return rc;
826
827         rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
828         if (rc != 0) {
829                 gss_teardown_sgtable(&sg_src);
830                 return rc;
831         }
832
833         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
834                                          sg_src.sgl, blocksize);
835
836         gss_teardown_sgtable(&sg_dst);
837         gss_teardown_sgtable(&sg_src);
838
839         if (rc) {
840                 CERROR("error to decrypt confounder: %d\n", rc);
841                 return rc;
842         }
843
844         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
845              i++) {
846                 if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
847                     != 0 ||
848                     BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
849                     != 0) {
850                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
851                                i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
852                                BD_GET_ENC_KIOV(desc, i).kiov_len,
853                                blocksize);
854                         return -EFAULT;
855                 }
856
857                 if (adj_nob) {
858                         if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
859                             desc->bd_nob_transferred)
860                                 BD_GET_ENC_KIOV(desc, i).kiov_len =
861                                         desc->bd_nob_transferred - ct_nob;
862
863                         BD_GET_KIOV(desc, i).kiov_len =
864                           BD_GET_ENC_KIOV(desc, i).kiov_len;
865                         if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
866                             desc->bd_nob)
867                                 BD_GET_KIOV(desc, i).kiov_len =
868                                   desc->bd_nob - pt_nob;
869                 } else {
870                         /* this should be guaranteed by LNET */
871                         LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
872                                 kiov_len <=
873                                 desc->bd_nob_transferred);
874                         LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
875                                 BD_GET_ENC_KIOV(desc, i).kiov_len);
876                 }
877
878                 if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
879                         continue;
880
881                 sg_init_table(&src, 1);
882                 sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
883                             BD_GET_ENC_KIOV(desc, i).kiov_len,
884                             BD_GET_ENC_KIOV(desc, i).kiov_offset);
885                 dst = src;
886                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
887                         sg_assign_page(&dst,
888                                        BD_GET_KIOV(desc, i).kiov_page);
889
890                 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
891                                                  src.length);
892                 if (rc) {
893                         CERROR("error to decrypt page: %d\n", rc);
894                         return rc;
895                 }
896
897                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
898                         memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
899                                BD_GET_KIOV(desc, i).kiov_offset,
900                                page_address(BD_GET_ENC_KIOV(desc, i).
901                                             kiov_page) +
902                                BD_GET_KIOV(desc, i).kiov_offset,
903                                BD_GET_KIOV(desc, i).kiov_len);
904                 }
905
906                 ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
907                 pt_nob += BD_GET_KIOV(desc, i).kiov_len;
908         }
909
910         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
911                 CERROR("%d cipher text transferred but only %d decrypted\n",
912                        desc->bd_nob_transferred, ct_nob);
913                 return -EFAULT;
914         }
915
916         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
917                 CERROR("%d plain text expected but only %d received\n",
918                        desc->bd_nob, pt_nob);
919                 return -EFAULT;
920         }
921
922         /* if needed, clear up the rest unused iovs */
923         if (adj_nob)
924                 while (i < desc->bd_iov_count)
925                         BD_GET_KIOV(desc, i++).kiov_len = 0;
926
927         /* decrypt tail (krb5 header) */
928         rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
929                                sizeof(*khdr));
930         if (rc != 0)
931                 return rc;
932
933         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
934                                sizeof(*khdr));
935         if (rc != 0) {
936                 gss_teardown_sgtable(&sg_src);
937                 return rc;
938         }
939
940         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
941                                          sizeof(*khdr));
942
943         gss_teardown_sgtable(&sg_src);
944         gss_teardown_sgtable(&sg_dst);
945
946         if (rc) {
947                 CERROR("error to decrypt tail: %d\n", rc);
948                 return rc;
949         }
950
951         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
952                 CERROR("krb5 header doesn't match\n");
953                 return -EACCES;
954         }
955
956         return 0;
957 }
958
959 static
960 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
961                         rawobj_t *gsshdr,
962                         rawobj_t *msg,
963                         int msg_buflen,
964                         rawobj_t *token)
965 {
966         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
967         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
968         struct krb5_header  *khdr;
969         int                  blocksize;
970         rawobj_t             cksum = RAWOBJ_EMPTY;
971         rawobj_t             data_desc[3], cipher;
972         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
973         __u8                 local_iv[16] = {0};
974         u32 major;
975         int                  rc = 0;
976
977         LASSERT(ke);
978         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
979         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
980                 ke->ke_conf_size >=
981                 crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
982
983         /*
984          * final token format:
985          * ---------------------------------------------------
986          * | krb5 header | cipher text | checksum (16 bytes) |
987          * ---------------------------------------------------
988          */
989
990         /* fill krb5 header */
991         LASSERT(token->len >= sizeof(*khdr));
992         khdr = (struct krb5_header *)token->data;
993         fill_krb5_header(kctx, khdr, 1);
994
995         /* generate confounder */
996         cfs_get_random_bytes(conf, ke->ke_conf_size);
997
998         /* get encryption blocksize. note kc_keye might not associated with
999          * a tfm, currently only for arcfour-hmac */
1000         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1001                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1002                 blocksize = 1;
1003         } else {
1004                 LASSERT(kctx->kc_keye.kb_tfm);
1005                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1006         }
1007         LASSERT(blocksize <= ke->ke_conf_size);
1008
1009         /* padding the message */
1010         if (gss_add_padding(msg, msg_buflen, blocksize))
1011                 return GSS_S_FAILURE;
1012
1013         /*
1014          * clear text layout for checksum:
1015          * ------------------------------------------------------
1016          * | confounder | gss header | clear msgs | krb5 header |
1017          * ------------------------------------------------------
1018          */
1019         data_desc[0].data = conf;
1020         data_desc[0].len = ke->ke_conf_size;
1021         data_desc[1].data = gsshdr->data;
1022         data_desc[1].len = gsshdr->len;
1023         data_desc[2].data = msg->data;
1024         data_desc[2].len = msg->len;
1025
1026         /* compute checksum */
1027         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1028                                khdr, 3, data_desc, 0, NULL, &cksum,
1029                                gctx->hash_func))
1030                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1031         LASSERT(cksum.len >= ke->ke_hash_size);
1032
1033         /*
1034          * clear text layout for encryption:
1035          * -----------------------------------------
1036          * | confounder | clear msgs | krb5 header |
1037          * -----------------------------------------
1038          */
1039         data_desc[0].data = conf;
1040         data_desc[0].len = ke->ke_conf_size;
1041         data_desc[1].data = msg->data;
1042         data_desc[1].len = msg->len;
1043         data_desc[2].data = (__u8 *) khdr;
1044         data_desc[2].len = sizeof(*khdr);
1045
1046         /* cipher text will be directly inplace */
1047         cipher.data = (__u8 *)(khdr + 1);
1048         cipher.len = token->len - sizeof(*khdr);
1049         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1050
1051         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1052                 rawobj_t arc4_keye = RAWOBJ_EMPTY;
1053                 struct crypto_blkcipher *arc4_tfm;
1054
1055                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1056                                        NULL, 1, &cksum, 0, NULL, &arc4_keye,
1057                                        gctx->hash_func)) {
1058                         CERROR("failed to obtain arc4 enc key\n");
1059                         GOTO(arc4_out_key, rc = -EACCES);
1060                 }
1061
1062                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1063                 if (IS_ERR(arc4_tfm)) {
1064                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1065                         GOTO(arc4_out_key, rc = -EACCES);
1066                 }
1067
1068                 if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1069                                             arc4_keye.len)) {
1070                         CERROR("failed to set arc4 key, len %d\n",
1071                                arc4_keye.len);
1072                         GOTO(arc4_out_tfm, rc = -EACCES);
1073                 }
1074
1075                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
1076                                        &cipher, 1);
1077 arc4_out_tfm:
1078                 crypto_free_blkcipher(arc4_tfm);
1079 arc4_out_key:
1080                 rawobj_free(&arc4_keye);
1081         } else {
1082                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
1083                                        data_desc, &cipher, 1);
1084         }
1085
1086         if (rc)
1087                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1088
1089         /* fill in checksum */
1090         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1091         memcpy((char *)(khdr + 1) + cipher.len,
1092                cksum.data + cksum.len - ke->ke_hash_size,
1093                ke->ke_hash_size);
1094
1095         /* final token length */
1096         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1097         major = GSS_S_COMPLETE;
1098 out_free_cksum:
1099         rawobj_free(&cksum);
1100         return major;
1101 }
1102
1103 static
1104 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1105                              struct ptlrpc_bulk_desc *desc)
1106 {
1107         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1108         int                  blocksize, i;
1109
1110         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1111         LASSERT(desc->bd_iov_count);
1112         LASSERT(GET_ENC_KIOV(desc));
1113         LASSERT(kctx->kc_keye.kb_tfm);
1114
1115         blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1116
1117         for (i = 0; i < desc->bd_iov_count; i++) {
1118                 LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
1119                 /*
1120                  * offset should always start at page boundary of either
1121                  * client or server side.
1122                  */
1123                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
1124                         CERROR("odd offset %d in page %d\n",
1125                                BD_GET_KIOV(desc, i).kiov_offset, i);
1126                         return GSS_S_FAILURE;
1127                 }
1128
1129                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
1130                         BD_GET_KIOV(desc, i).kiov_offset;
1131                 BD_GET_ENC_KIOV(desc, i).kiov_len =
1132                         (BD_GET_KIOV(desc, i).kiov_len +
1133                          blocksize - 1) & (~(blocksize - 1));
1134         }
1135
1136         return GSS_S_COMPLETE;
1137 }
1138
1139 static
1140 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1141                              struct ptlrpc_bulk_desc *desc,
1142                              rawobj_t *token, int adj_nob)
1143 {
1144         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1145         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1146         struct krb5_header  *khdr;
1147         int                  blocksize;
1148         rawobj_t             cksum = RAWOBJ_EMPTY;
1149         rawobj_t             data_desc[1], cipher;
1150         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1151         int rc = 0;
1152         u32 major;
1153
1154         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1155         LASSERT(ke);
1156         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1157
1158         /*
1159          * final token format:
1160          * --------------------------------------------------
1161          * | krb5 header | head/tail cipher text | checksum |
1162          * --------------------------------------------------
1163          */
1164
1165         /* fill krb5 header */
1166         LASSERT(token->len >= sizeof(*khdr));
1167         khdr = (struct krb5_header *)token->data;
1168         fill_krb5_header(kctx, khdr, 1);
1169
1170         /* generate confounder */
1171         cfs_get_random_bytes(conf, ke->ke_conf_size);
1172
1173         /* get encryption blocksize. note kc_keye might not associated with
1174          * a tfm, currently only for arcfour-hmac */
1175         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1176                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1177                 blocksize = 1;
1178         } else {
1179                 LASSERT(kctx->kc_keye.kb_tfm);
1180                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1181         }
1182
1183         /*
1184          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1185          * the bulk token size would be exactly (sizeof(krb5_header) +
1186          * blocksize + sizeof(krb5_header) + hashsize)
1187          */
1188         LASSERT(blocksize <= ke->ke_conf_size);
1189         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1190         LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1191
1192         /*
1193          * clear text layout for checksum:
1194          * ------------------------------------------
1195          * | confounder | clear pages | krb5 header |
1196          * ------------------------------------------
1197          */
1198         data_desc[0].data = conf;
1199         data_desc[0].len = ke->ke_conf_size;
1200
1201         /* compute checksum */
1202         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1203                                khdr, 1, data_desc,
1204                                desc->bd_iov_count, GET_KIOV(desc),
1205                                &cksum, gctx->hash_func))
1206                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1207         LASSERT(cksum.len >= ke->ke_hash_size);
1208
1209         /*
1210          * clear text layout for encryption:
1211          * ------------------------------------------
1212          * | confounder | clear pages | krb5 header |
1213          * ------------------------------------------
1214          *        |              |             |
1215          *        ----------  (cipher pages)   |
1216          * result token:   |                   |
1217          * -------------------------------------------
1218          * | krb5 header | cipher text | cipher text |
1219          * -------------------------------------------
1220          */
1221         data_desc[0].data = conf;
1222         data_desc[0].len = ke->ke_conf_size;
1223
1224         cipher.data = (__u8 *)(khdr + 1);
1225         cipher.len = blocksize + sizeof(*khdr);
1226
1227         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1228                 LBUG();
1229                 rc = 0;
1230         } else {
1231                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1232                                        conf, desc, &cipher, adj_nob);
1233         }
1234         if (rc)
1235                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1236
1237         /* fill in checksum */
1238         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1239         memcpy((char *)(khdr + 1) + cipher.len,
1240                cksum.data + cksum.len - ke->ke_hash_size,
1241                ke->ke_hash_size);
1242
1243         /* final token length */
1244         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1245         major = GSS_S_COMPLETE;
1246 out_free_cksum:
1247         rawobj_free(&cksum);
1248         return major;
1249 }
1250
1251 static
1252 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1253                           rawobj_t        *gsshdr,
1254                           rawobj_t        *token,
1255                           rawobj_t        *msg)
1256 {
1257         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1258         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1259         struct krb5_header  *khdr;
1260         unsigned char       *tmpbuf;
1261         int                  blocksize, bodysize;
1262         rawobj_t             cksum = RAWOBJ_EMPTY;
1263         rawobj_t             cipher_in, plain_out;
1264         rawobj_t             hash_objs[3];
1265         int                  rc = 0;
1266         __u32                major;
1267         __u8                 local_iv[16] = {0};
1268
1269         LASSERT(ke);
1270
1271         if (token->len < sizeof(*khdr)) {
1272                 CERROR("short signature: %u\n", token->len);
1273                 return GSS_S_DEFECTIVE_TOKEN;
1274         }
1275
1276         khdr = (struct krb5_header *)token->data;
1277
1278         major = verify_krb5_header(kctx, khdr, 1);
1279         if (major != GSS_S_COMPLETE) {
1280                 CERROR("bad krb5 header\n");
1281                 return major;
1282         }
1283
1284         /* block size */
1285         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1286                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1287                 blocksize = 1;
1288         } else {
1289                 LASSERT(kctx->kc_keye.kb_tfm);
1290                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1291         }
1292
1293         /* expected token layout:
1294          * ----------------------------------------
1295          * | krb5 header | cipher text | checksum |
1296          * ----------------------------------------
1297          */
1298         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1299
1300         if (bodysize % blocksize) {
1301                 CERROR("odd bodysize %d\n", bodysize);
1302                 return GSS_S_DEFECTIVE_TOKEN;
1303         }
1304
1305         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1306                 CERROR("incomplete token: bodysize %d\n", bodysize);
1307                 return GSS_S_DEFECTIVE_TOKEN;
1308         }
1309
1310         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1311                 CERROR("buffer too small: %u, require %d\n",
1312                        msg->len, bodysize - ke->ke_conf_size);
1313                 return GSS_S_FAILURE;
1314         }
1315
1316         /* decrypting */
1317         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1318         if (!tmpbuf)
1319                 return GSS_S_FAILURE;
1320
1321         major = GSS_S_FAILURE;
1322
1323         cipher_in.data = (__u8 *)(khdr + 1);
1324         cipher_in.len = bodysize;
1325         plain_out.data = tmpbuf;
1326         plain_out.len = bodysize;
1327
1328         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1329                 rawobj_t                 arc4_keye;
1330                 struct crypto_blkcipher *arc4_tfm;
1331
1332                 cksum.data = token->data + token->len - ke->ke_hash_size;
1333                 cksum.len = ke->ke_hash_size;
1334
1335                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1336                                        NULL, 1, &cksum, 0, NULL, &arc4_keye,
1337                                        gctx->hash_func)) {
1338                         CERROR("failed to obtain arc4 enc key\n");
1339                         GOTO(arc4_out, rc = -EACCES);
1340                 }
1341
1342                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1343                 if (IS_ERR(arc4_tfm)) {
1344                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1345                         GOTO(arc4_out_key, rc = -EACCES);
1346                 }
1347
1348                 if (crypto_blkcipher_setkey(arc4_tfm,
1349                                             arc4_keye.data, arc4_keye.len)) {
1350                         CERROR("failed to set arc4 key, len %d\n",
1351                                arc4_keye.len);
1352                         GOTO(arc4_out_tfm, rc = -EACCES);
1353                 }
1354
1355                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
1356                                        &plain_out, 0);
1357 arc4_out_tfm:
1358                 crypto_free_blkcipher(arc4_tfm);
1359 arc4_out_key:
1360                 rawobj_free(&arc4_keye);
1361 arc4_out:
1362                 cksum = RAWOBJ_EMPTY;
1363         } else {
1364                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1,
1365                                        &cipher_in, &plain_out, 0);
1366         }
1367
1368         if (rc != 0) {
1369                 CERROR("error decrypt\n");
1370                 goto out_free;
1371         }
1372         LASSERT(plain_out.len == bodysize);
1373
1374         /* expected clear text layout:
1375          * -----------------------------------------
1376          * | confounder | clear msgs | krb5 header |
1377          * -----------------------------------------
1378          */
1379
1380         /* verify krb5 header in token is not modified */
1381         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1382                    sizeof(*khdr))) {
1383                 CERROR("decrypted krb5 header mismatch\n");
1384                 goto out_free;
1385         }
1386
1387         /* verify checksum, compose clear text as layout:
1388          * ------------------------------------------------------
1389          * | confounder | gss header | clear msgs | krb5 header |
1390          * ------------------------------------------------------
1391          */
1392         hash_objs[0].len = ke->ke_conf_size;
1393         hash_objs[0].data = plain_out.data;
1394         hash_objs[1].len = gsshdr->len;
1395         hash_objs[1].data = gsshdr->data;
1396         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1397         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1398         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1399                                khdr, 3, hash_objs, 0, NULL, &cksum,
1400                                gctx->hash_func))
1401                 goto out_free;
1402
1403         LASSERT(cksum.len >= ke->ke_hash_size);
1404         if (memcmp((char *)(khdr + 1) + bodysize,
1405                    cksum.data + cksum.len - ke->ke_hash_size,
1406                    ke->ke_hash_size)) {
1407                 CERROR("checksum mismatch\n");
1408                 goto out_free;
1409         }
1410
1411         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1412         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1413
1414         major = GSS_S_COMPLETE;
1415 out_free:
1416         OBD_FREE_LARGE(tmpbuf, bodysize);
1417         rawobj_free(&cksum);
1418         return major;
1419 }
1420
1421 static
1422 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1423                                struct ptlrpc_bulk_desc *desc,
1424                                rawobj_t *token, int adj_nob)
1425 {
1426         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1427         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1428         struct krb5_header  *khdr;
1429         int                  blocksize;
1430         rawobj_t             cksum = RAWOBJ_EMPTY;
1431         rawobj_t             cipher, plain;
1432         rawobj_t             data_desc[1];
1433         int                  rc;
1434         __u32                major;
1435
1436         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1437         LASSERT(ke);
1438
1439         if (token->len < sizeof(*khdr)) {
1440                 CERROR("short signature: %u\n", token->len);
1441                 return GSS_S_DEFECTIVE_TOKEN;
1442         }
1443
1444         khdr = (struct krb5_header *)token->data;
1445
1446         major = verify_krb5_header(kctx, khdr, 1);
1447         if (major != GSS_S_COMPLETE) {
1448                 CERROR("bad krb5 header\n");
1449                 return major;
1450         }
1451
1452         /* block size */
1453         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1454                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1455                 blocksize = 1;
1456                 LBUG();
1457         } else {
1458                 LASSERT(kctx->kc_keye.kb_tfm);
1459                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1460         }
1461         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1462
1463         /*
1464          * token format is expected as:
1465          * -----------------------------------------------
1466          * | krb5 header | head/tail cipher text | cksum |
1467          * -----------------------------------------------
1468          */
1469         if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1470             ke->ke_hash_size) {
1471                 CERROR("short token size: %u\n", token->len);
1472                 return GSS_S_DEFECTIVE_TOKEN;
1473         }
1474
1475         cipher.data = (__u8 *) (khdr + 1);
1476         cipher.len = blocksize + sizeof(*khdr);
1477         plain.data = cipher.data;
1478         plain.len = cipher.len;
1479
1480         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1481                                desc, &cipher, &plain, adj_nob);
1482         if (rc)
1483                 return GSS_S_DEFECTIVE_TOKEN;
1484
1485         /*
1486          * verify checksum, compose clear text as layout:
1487          * ------------------------------------------
1488          * | confounder | clear pages | krb5 header |
1489          * ------------------------------------------
1490          */
1491         data_desc[0].data = plain.data;
1492         data_desc[0].len = blocksize;
1493
1494         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1495                                khdr, 1, data_desc,
1496                                desc->bd_iov_count,
1497                                GET_KIOV(desc),
1498                                &cksum, gctx->hash_func))
1499                 return GSS_S_FAILURE;
1500         LASSERT(cksum.len >= ke->ke_hash_size);
1501
1502         if (memcmp(plain.data + blocksize + sizeof(*khdr),
1503                    cksum.data + cksum.len - ke->ke_hash_size,
1504                    ke->ke_hash_size)) {
1505                 CERROR("checksum mismatch\n");
1506                 rawobj_free(&cksum);
1507                 return GSS_S_BAD_SIG;
1508         }
1509
1510         rawobj_free(&cksum);
1511         return GSS_S_COMPLETE;
1512 }
1513
1514 int gss_display_kerberos(struct gss_ctx        *ctx,
1515                          char                  *buf,
1516                          int                    bufsize)
1517 {
1518         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1519         int                 written;
1520
1521         written = snprintf(buf, bufsize, "krb5 (%s)",
1522                            enctype2str(kctx->kc_enctype));
1523         return written;
1524 }
1525
1526 static struct gss_api_ops gss_kerberos_ops = {
1527         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1528         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1529         .gss_inquire_context        = gss_inquire_context_kerberos,
1530         .gss_get_mic                = gss_get_mic_kerberos,
1531         .gss_verify_mic             = gss_verify_mic_kerberos,
1532         .gss_wrap                   = gss_wrap_kerberos,
1533         .gss_unwrap                 = gss_unwrap_kerberos,
1534         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1535         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1536         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1537         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1538         .gss_display                = gss_display_kerberos,
1539 };
1540
1541 static struct subflavor_desc gss_kerberos_sfs[] = {
1542         {
1543                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1544                 .sf_qop         = 0,
1545                 .sf_service     = SPTLRPC_SVC_NULL,
1546                 .sf_name        = "krb5n"
1547         },
1548         {
1549                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1550                 .sf_qop         = 0,
1551                 .sf_service     = SPTLRPC_SVC_AUTH,
1552                 .sf_name        = "krb5a"
1553         },
1554         {
1555                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1556                 .sf_qop         = 0,
1557                 .sf_service     = SPTLRPC_SVC_INTG,
1558                 .sf_name        = "krb5i"
1559         },
1560         {
1561                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1562                 .sf_qop         = 0,
1563                 .sf_service     = SPTLRPC_SVC_PRIV,
1564                 .sf_name        = "krb5p"
1565         },
1566 };
1567
1568 static struct gss_api_mech gss_kerberos_mech = {
1569         /* .gm_owner uses default NULL value for THIS_MODULE */
1570         .gm_name        = "krb5",
1571         .gm_oid         = (rawobj_t)
1572                                 {9, "\052\206\110\206\367\022\001\002\002"},
1573         .gm_ops         = &gss_kerberos_ops,
1574         .gm_sf_num      = 4,
1575         .gm_sfs         = gss_kerberos_sfs,
1576 };
1577
1578 int __init init_kerberos_module(void)
1579 {
1580         int status;
1581
1582         spin_lock_init(&krb5_seq_lock);
1583
1584         status = lgss_mech_register(&gss_kerberos_mech);
1585         if (status)
1586                 CERROR("Failed to register kerberos gss mechanism!\n");
1587         return status;
1588 }
1589
1590 void cleanup_kerberos_module(void)
1591 {
1592         lgss_mech_unregister(&gss_kerberos_mech);
1593 }