Whamcloud - gitweb
LU-6142 lustre: convert snprintf to scnprintf as appropriate
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, 2015, Intel Corporation.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/random.h>
55 #include <linux/slab.h>
56 #include <linux/crypto.h>
57 #include <linux/mutex.h>
58
59 #include <obd.h>
60 #include <obd_class.h>
61 #include <obd_support.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
65
66 #include "gss_err.h"
67 #include "gss_internal.h"
68 #include "gss_api.h"
69 #include "gss_asn1.h"
70 #include "gss_krb5.h"
71 #include "gss_crypto.h"
72
73 static DEFINE_SPINLOCK(krb5_seq_lock);
74
75 struct krb5_enctype {
76         char           *ke_dispname;
77         char           *ke_enc_name;            /* linux tfm name */
78         char           *ke_hash_name;           /* linux tfm name */
79         int             ke_enc_mode;            /* linux tfm mode */
80         int             ke_hash_size;           /* checksum size */
81         int             ke_conf_size;           /* confounder size */
82         unsigned int    ke_hash_hmac:1;         /* is hmac? */
83 };
84
85 /*
86  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
87  * but currently we simply CBC with padding, because linux doesn't support CTS
88  * yet. this need to be fixed in the future.
89  */
90 static struct krb5_enctype enctypes[] = {
91         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
92                 .ke_dispname    = "des-cbc-md5",
93                 .ke_enc_name    = "cbc(des)",
94                 .ke_hash_name   = "md5",
95                 .ke_hash_size   = 16,
96                 .ke_conf_size   = 8,
97         },
98 #ifdef HAVE_DES3_SUPPORT
99         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
100                 .ke_dispname    = "des3-hmac-sha1",
101                 .ke_enc_name    = "cbc(des3_ede)",
102                 .ke_hash_name   = "sha1",
103                 .ke_hash_size   = 20,
104                 .ke_conf_size   = 8,
105                 .ke_hash_hmac   = 1,
106         },
107 #endif
108         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
109                 .ke_dispname    = "aes128-cts-hmac-sha1-96",
110                 .ke_enc_name    = "cbc(aes)",
111                 .ke_hash_name   = "sha1",
112                 .ke_hash_size   = 12,
113                 .ke_conf_size   = 16,
114                 .ke_hash_hmac   = 1,
115         },
116         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
117                 .ke_dispname    = "aes256-cts-hmac-sha1-96",
118                 .ke_enc_name    = "cbc(aes)",
119                 .ke_hash_name   = "sha1",
120                 .ke_hash_size   = 12,
121                 .ke_conf_size   = 16,
122                 .ke_hash_hmac   = 1,
123         },
124         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
125                 .ke_dispname    = "arcfour-hmac-md5",
126                 .ke_enc_name    = "ecb(arc4)",
127                 .ke_hash_name   = "md5",
128                 .ke_hash_size   = 16,
129                 .ke_conf_size   = 8,
130                 .ke_hash_hmac   = 1,
131         }
132 };
133
134 static const char * enctype2str(__u32 enctype)
135 {
136         if (enctype < ARRAY_SIZE(enctypes) && enctypes[enctype].ke_dispname)
137                 return enctypes[enctype].ke_dispname;
138
139         return "unknown";
140 }
141
142 static
143 int krb5_init_keys(struct krb5_ctx *kctx)
144 {
145         struct krb5_enctype *ke;
146
147         if (kctx->kc_enctype >= ARRAY_SIZE(enctypes) ||
148             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
149                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
150                 return -1;
151         }
152
153         ke = &enctypes[kctx->kc_enctype];
154
155         /* tfm arc4 is stateful, user should alloc-use-free by his own */
156         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
157             gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
158                 return -1;
159
160         /* tfm hmac is stateful, user should alloc-use-free by his own */
161         if (ke->ke_hash_hmac == 0 &&
162             gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
163                 return -1;
164         if (ke->ke_hash_hmac == 0 &&
165             gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
166                 return -1;
167
168         return 0;
169 }
170
171 static
172 void delete_context_kerberos(struct krb5_ctx *kctx)
173 {
174         rawobj_free(&kctx->kc_mech_used);
175
176         gss_keyblock_free(&kctx->kc_keye);
177         gss_keyblock_free(&kctx->kc_keyi);
178         gss_keyblock_free(&kctx->kc_keyc);
179 }
180
181 static
182 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
183 {
184         unsigned int    tmp_uint, keysize;
185
186         /* seed_init flag */
187         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
188                 goto out_err;
189         kctx->kc_seed_init = (tmp_uint != 0);
190
191         /* seed */
192         if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
193                 goto out_err;
194
195         /* sign/seal algorithm, not really used now */
196         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
197             gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
198                 goto out_err;
199
200         /* end time. While kc_endtime might be 64 bit the krb5 API
201          * still uses 32 bits. To delay the 2038 bug see the incoming
202          * value as a u32 which give us until 2106. See the link for details:
203          *
204          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
205          */
206         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
207                 goto out_err;
208
209         /* seq send */
210         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
211                 goto out_err;
212         kctx->kc_seq_send = tmp_uint;
213
214         /* mech oid */
215         if (gss_get_rawobj(&p, end, &kctx->kc_mech_used))
216                 goto out_err;
217
218         /* old style enc/seq keys in format:
219          *   - enctype (u32)
220          *   - keysize (u32)
221          *   - keydata
222          * we decompose them to fit into the new context
223          */
224
225         /* enc key */
226         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
227                 goto out_err;
228
229         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
230                 goto out_err;
231
232         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
233                 goto out_err;
234
235         /* seq key */
236         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
237             tmp_uint != kctx->kc_enctype)
238                 goto out_err;
239
240         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
241             tmp_uint != keysize)
242                 goto out_err;
243
244         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
245                 goto out_err;
246
247         /* old style fallback */
248         if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
249                 goto out_err;
250
251         if (p != end)
252                 goto out_err;
253
254         CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
255         return 0;
256 out_err:
257         return GSS_S_FAILURE;
258 }
259
260 /* Flags for version 2 context flags */
261 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
262 #define KRB5_CTX_FLAG_CFX               0x00000002
263 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
264
265 static
266 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
267 {
268         unsigned int    tmp_uint, keysize;
269
270         /* end time. While kc_endtime might be 64 bit the krb5 API
271          * still uses 32 bits. To delay the 2038 bug see the incoming
272          * value as a u32 which give us until 2106. See the link for details:
273          *
274          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
275          */
276         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
277                 goto out_err;
278
279         /* flags */
280         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
281                 goto out_err;
282
283         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
284                 kctx->kc_initiate = 1;
285         if (tmp_uint & KRB5_CTX_FLAG_CFX)
286                 kctx->kc_cfx = 1;
287         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
288                 kctx->kc_have_acceptor_subkey = 1;
289
290         /* seq send */
291         if (gss_get_bytes(&p, end, &kctx->kc_seq_send,
292             sizeof(kctx->kc_seq_send)))
293                 goto out_err;
294
295         /* enctype */
296         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
297                 goto out_err;
298
299         /* size of each key */
300         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
301                 goto out_err;
302
303         /* number of keys - should always be 3 */
304         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
305                 goto out_err;
306
307         if (tmp_uint != 3) {
308                 CERROR("Invalid number of keys: %u\n", tmp_uint);
309                 goto out_err;
310         }
311
312         /* ke */
313         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
314                 goto out_err;
315         /* ki */
316         if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize))
317                 goto out_err;
318         /* ki */
319         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
320                 goto out_err;
321
322         CDEBUG(D_SEC, "successfully imported v2 context\n");
323         return 0;
324 out_err:
325         return GSS_S_FAILURE;
326 }
327
328 /*
329  * The whole purpose here is trying to keep user level gss context parsing
330  * from nfs-utils unchanged as possible as we can, they are not quite mature
331  * yet, and many stuff still not clear, like heimdal etc.
332  */
333 static
334 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
335                                       struct gss_ctx *gctx)
336 {
337         struct krb5_ctx *kctx;
338         char *p = (char *)inbuf->data;
339         char *end = (char *)(inbuf->data + inbuf->len);
340         unsigned int tmp_uint, rc;
341
342         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
343                 CERROR("Fail to read version\n");
344                 return GSS_S_FAILURE;
345         }
346
347         /* only support 0, 1 for the moment */
348         if (tmp_uint > 2) {
349                 CERROR("Invalid version %u\n", tmp_uint);
350                 return GSS_S_FAILURE;
351         }
352
353         OBD_ALLOC_PTR(kctx);
354         if (!kctx)
355                 return GSS_S_FAILURE;
356
357         if (tmp_uint == 0 || tmp_uint == 1) {
358                 kctx->kc_initiate = tmp_uint;
359                 rc = import_context_rfc1964(kctx, p, end);
360         } else {
361                 rc = import_context_rfc4121(kctx, p, end);
362         }
363
364         if (rc == 0)
365                 rc = krb5_init_keys(kctx);
366
367         if (rc) {
368                 delete_context_kerberos(kctx);
369                 OBD_FREE_PTR(kctx);
370
371                 return GSS_S_FAILURE;
372         }
373
374         gctx->internal_ctx_id = kctx;
375         return GSS_S_COMPLETE;
376 }
377
378 static
379 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
380                                         struct gss_ctx *gctx_new)
381 {
382         struct krb5_ctx *kctx = gctx->internal_ctx_id;
383         struct krb5_ctx *knew;
384
385         OBD_ALLOC_PTR(knew);
386         if (!knew)
387                 return GSS_S_FAILURE;
388
389         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
390         knew->kc_cfx = kctx->kc_cfx;
391         knew->kc_seed_init = kctx->kc_seed_init;
392         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
393         knew->kc_endtime = kctx->kc_endtime;
394
395         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
396         knew->kc_seq_send = kctx->kc_seq_recv;
397         knew->kc_seq_recv = kctx->kc_seq_send;
398         knew->kc_enctype = kctx->kc_enctype;
399
400         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
401                 goto out_err;
402
403         if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
404                 goto out_err;
405         if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
406                 goto out_err;
407         if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
408                 goto out_err;
409         if (krb5_init_keys(knew))
410                 goto out_err;
411
412         gctx_new->internal_ctx_id = knew;
413         CDEBUG(D_SEC, "successfully copied reverse context\n");
414         return GSS_S_COMPLETE;
415
416 out_err:
417         delete_context_kerberos(knew);
418         OBD_FREE_PTR(knew);
419         return GSS_S_FAILURE;
420 }
421
422 static
423 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
424                                    time64_t *endtime)
425 {
426         struct krb5_ctx *kctx = gctx->internal_ctx_id;
427
428         *endtime = kctx->kc_endtime;
429         return GSS_S_COMPLETE;
430 }
431
432 static
433 void gss_delete_sec_context_kerberos(void *internal_ctx)
434 {
435         struct krb5_ctx *kctx = internal_ctx;
436
437         delete_context_kerberos(kctx);
438         OBD_FREE_PTR(kctx);
439 }
440
441 /*
442  * compute (keyed/keyless) checksum against the plain text which appended
443  * with krb5 wire token header.
444  */
445 static
446 __s32 krb5_make_checksum(__u32 enctype,
447                          struct gss_keyblock *kb,
448                          struct krb5_header *khdr,
449                          int msgcnt, rawobj_t *msgs,
450                          int iovcnt, struct bio_vec *iovs,
451                          rawobj_t *cksum,
452                          digest_hash hash_func)
453 {
454         struct krb5_enctype *ke = &enctypes[enctype];
455         struct ahash_request *req = NULL;
456         enum cfs_crypto_hash_alg hash_algo;
457         rawobj_t hdr;
458         int rc;
459
460         hash_algo = cfs_crypto_hash_alg(ke->ke_hash_name);
461
462         /* For the cbc(des) case we want md5 instead of hmac(md5) */
463         if (strcmp(ke->ke_enc_name, "cbc(des)"))
464                 req = cfs_crypto_hash_init(hash_algo, kb->kb_key.data,
465                                            kb->kb_key.len);
466         else
467                 req = cfs_crypto_hash_init(hash_algo, NULL, 0);
468         if (IS_ERR(req)) {
469                 rc = PTR_ERR(req);
470                 CERROR("failed to alloc hash %s : rc = %d\n",
471                        ke->ke_hash_name, rc);
472                 goto out_no_hash;
473         }
474
475         cksum->len = cfs_crypto_hash_digestsize(hash_algo);
476         OBD_ALLOC_LARGE(cksum->data, cksum->len);
477         if (!cksum->data) {
478                 cksum->len = 0;
479                 rc = -ENOMEM;
480                 goto out_free_hash;
481         }
482
483         hdr.data = (__u8 *)khdr;
484         hdr.len = sizeof(*khdr);
485
486         if (!hash_func) {
487                 rc = -EPROTO;
488                 CERROR("hash function for %s undefined\n",
489                        ke->ke_hash_name);
490                 goto out_free_hash;
491         }
492         rc = hash_func(req, &hdr, msgcnt, msgs, iovcnt, iovs);
493         if (rc)
494                 goto out_free_hash;
495
496         if (!ke->ke_hash_hmac) {
497                 LASSERT(kb->kb_tfm);
498
499                 cfs_crypto_hash_final(req, cksum->data, &cksum->len);
500                 rc = gss_crypt_generic(kb->kb_tfm, 0, NULL,
501                                        cksum->data, cksum->data,
502                                        cksum->len);
503                 goto out_no_hash;
504         }
505
506 out_free_hash:
507         if (req)
508                 cfs_crypto_hash_final(req, cksum->data, &cksum->len);
509 out_no_hash:
510         return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
511 }
512
513 static void fill_krb5_header(struct krb5_ctx *kctx,
514                              struct krb5_header *khdr,
515                              int privacy)
516 {
517         unsigned char acceptor_flag;
518
519         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
520
521         if (privacy) {
522                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
523                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
524                 khdr->kh_ec = cpu_to_be16(0);
525                 khdr->kh_rrc = cpu_to_be16(0);
526         } else {
527                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
528                 khdr->kh_flags = acceptor_flag;
529                 khdr->kh_ec = cpu_to_be16(0xffff);
530                 khdr->kh_rrc = cpu_to_be16(0xffff);
531         }
532
533         khdr->kh_filler = 0xff;
534         spin_lock(&krb5_seq_lock);
535         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
536         spin_unlock(&krb5_seq_lock);
537 }
538
539 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
540                                 struct krb5_header *khdr,
541                                 int privacy)
542 {
543         unsigned char acceptor_flag;
544         __u16         tok_id, ec_rrc;
545
546         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
547
548         if (privacy) {
549                 tok_id = KG_TOK_WRAP_MSG;
550                 ec_rrc = 0x0;
551         } else {
552                 tok_id = KG_TOK_MIC_MSG;
553                 ec_rrc = 0xffff;
554         }
555
556         /* sanity checks */
557         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
558                 CERROR("bad token id\n");
559                 return GSS_S_DEFECTIVE_TOKEN;
560         }
561         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
562                 CERROR("bad direction flag\n");
563                 return GSS_S_BAD_SIG;
564         }
565         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
566                 CERROR("missing confidential flag\n");
567                 return GSS_S_BAD_SIG;
568         }
569         if (khdr->kh_filler != 0xff) {
570                 CERROR("bad filler\n");
571                 return GSS_S_DEFECTIVE_TOKEN;
572         }
573         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
574             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
575                 CERROR("bad EC or RRC\n");
576                 return GSS_S_DEFECTIVE_TOKEN;
577         }
578         return GSS_S_COMPLETE;
579 }
580
581 static
582 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
583                            int msgcnt,
584                            rawobj_t *msgs,
585                            int iovcnt,
586                            struct bio_vec *iovs,
587                            rawobj_t *token)
588 {
589         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
590         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
591         struct krb5_header  *khdr;
592         rawobj_t cksum = RAWOBJ_EMPTY;
593         u32 major;
594
595         /* fill krb5 header */
596         LASSERT(token->len >= sizeof(*khdr));
597         khdr = (struct krb5_header *)token->data;
598         fill_krb5_header(kctx, khdr, 0);
599
600         /* checksum */
601         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, khdr,
602                                msgcnt, msgs, iovcnt, iovs, &cksum,
603                                gctx->hash_func))
604                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
605
606         LASSERT(cksum.len >= ke->ke_hash_size);
607         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
608         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
609                ke->ke_hash_size);
610
611         token->len = sizeof(*khdr) + ke->ke_hash_size;
612         major = GSS_S_COMPLETE;
613 out_free_cksum:
614         rawobj_free(&cksum);
615         return major;
616 }
617
618 static
619 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
620                               int msgcnt,
621                               rawobj_t *msgs,
622                               int iovcnt,
623                               struct bio_vec *iovs,
624                               rawobj_t *token)
625 {
626         struct krb5_ctx *kctx = gctx->internal_ctx_id;
627         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
628         struct krb5_header *khdr;
629         rawobj_t cksum = RAWOBJ_EMPTY;
630         u32 major;
631
632         if (token->len < sizeof(*khdr)) {
633                 CERROR("short signature: %u\n", token->len);
634                 return GSS_S_DEFECTIVE_TOKEN;
635         }
636
637         khdr = (struct krb5_header *)token->data;
638
639         major = verify_krb5_header(kctx, khdr, 0);
640         if (major != GSS_S_COMPLETE) {
641                 CERROR("bad krb5 header\n");
642                 goto out;
643         }
644
645         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
646                 CERROR("short signature: %u, require %d\n",
647                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
648                 GOTO(out, major = GSS_S_FAILURE);
649         }
650
651         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
652                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum,
653                                gctx->hash_func))
654                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
655
656         LASSERT(cksum.len >= ke->ke_hash_size);
657         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
658                    ke->ke_hash_size)) {
659                 CERROR("checksum mismatch\n");
660                 GOTO(out_free_cksum, major = GSS_S_BAD_SIG);
661         }
662         major = GSS_S_COMPLETE;
663 out_free_cksum:
664         rawobj_free(&cksum);
665 out:
666         return major;
667 }
668
669 /*
670  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
671  */
672 static
673 int krb5_encrypt_bulk(struct crypto_sync_skcipher *tfm,
674                       struct krb5_header *khdr,
675                       char *confounder,
676                       struct ptlrpc_bulk_desc *desc,
677                       rawobj_t *cipher,
678                       int adj_nob)
679 {
680         __u8 local_iv[16] = {0};
681         struct scatterlist src, dst;
682         struct sg_table sg_src, sg_dst;
683         int blocksize, i, rc, nob = 0;
684         SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
685
686         LASSERT(desc->bd_iov_count);
687         LASSERT(desc->bd_enc_vec);
688
689         blocksize = crypto_sync_skcipher_blocksize(tfm);
690         LASSERT(blocksize > 1);
691         LASSERT(cipher->len == blocksize + sizeof(*khdr));
692
693         /* encrypt confounder */
694         rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
695         if (rc != 0)
696                 return rc;
697
698         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
699         if (rc != 0) {
700                 gss_teardown_sgtable(&sg_src);
701                 return rc;
702         }
703         skcipher_request_set_sync_tfm(req, tfm);
704         skcipher_request_set_callback(req, 0, NULL, NULL);
705         skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
706                                    blocksize, local_iv);
707
708         rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
709
710         gss_teardown_sgtable(&sg_dst);
711         gss_teardown_sgtable(&sg_src);
712
713         if (rc) {
714                 CERROR("error to encrypt confounder: %d\n", rc);
715                 skcipher_request_zero(req);
716                 return rc;
717         }
718
719         /* encrypt clear pages */
720         for (i = 0; i < desc->bd_iov_count; i++) {
721                 sg_init_table(&src, 1);
722                 sg_set_page(&src, desc->bd_vec[i].bv_page,
723                             (desc->bd_vec[i].bv_len +
724                                 blocksize - 1) &
725                             (~(blocksize - 1)),
726                             desc->bd_vec[i].bv_offset);
727                 if (adj_nob)
728                         nob += src.length;
729                 sg_init_table(&dst, 1);
730                 sg_set_page(&dst, desc->bd_enc_vec[i].bv_page,
731                             src.length, src.offset);
732
733                 desc->bd_enc_vec[i].bv_offset = dst.offset;
734                 desc->bd_enc_vec[i].bv_len = dst.length;
735
736                 skcipher_request_set_crypt(req, &src, &dst,
737                                           src.length, local_iv);
738                 rc = crypto_skcipher_encrypt_iv(req, &dst, &src, src.length);
739                 if (rc) {
740                         CERROR("error to encrypt page: %d\n", rc);
741                         skcipher_request_zero(req);
742                         return rc;
743                 }
744         }
745
746         /* encrypt krb5 header */
747         rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
748         if (rc != 0) {
749                 skcipher_request_zero(req);
750                 return rc;
751         }
752
753         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
754                            sizeof(*khdr));
755         if (rc != 0) {
756                 gss_teardown_sgtable(&sg_src);
757                 skcipher_request_zero(req);
758                 return rc;
759         }
760
761         skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
762                                    sizeof(*khdr), local_iv);
763         rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl,
764                                         sizeof(*khdr));
765         skcipher_request_zero(req);
766
767         gss_teardown_sgtable(&sg_dst);
768         gss_teardown_sgtable(&sg_src);
769
770         if (rc) {
771                 CERROR("error to encrypt krb5 header: %d\n", rc);
772                 return rc;
773         }
774
775         if (adj_nob)
776                 desc->bd_nob = nob;
777
778         return 0;
779 }
780
781 /*
782  * desc->bd_nob_transferred is the size of cipher text received.
783  * desc->bd_nob is the target size of plain text supposed to be.
784  *
785  * if adj_nob != 0, we adjust each page's bv_len to the actual
786  * plain text size.
787  * - for client read: we don't know data size for each page, so
788  *   bd_iov[]->bv_len is set to PAGE_SIZE, but actual data received might
789  *   be smaller, so we need to adjust it according to
790  *   bd_u.bd_kiov.bd_enc_vec[]->bv_len.
791  *   this means we DO NOT support the situation that server send an odd size
792  *   data in a page which is not the last one.
793  * - for server write: we knows exactly data size for each page being expected,
794  *   thus bv_len is accurate already, so we should not adjust it at all.
795  *   and bd_u.bd_kiov.bd_enc_vec[]->bv_len should be
796  *   round_up(bd_iov[]->bv_len) which
797  *   should have been done by prep_bulk().
798  */
799 static
800 int krb5_decrypt_bulk(struct crypto_sync_skcipher *tfm,
801                       struct krb5_header *khdr,
802                       struct ptlrpc_bulk_desc *desc,
803                       rawobj_t *cipher,
804                       rawobj_t *plain,
805                       int adj_nob)
806 {
807         __u8 local_iv[16] = {0};
808         struct scatterlist src, dst;
809         struct sg_table sg_src, sg_dst;
810         int ct_nob = 0, pt_nob = 0;
811         int blocksize, i, rc;
812         SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
813
814         LASSERT(desc->bd_iov_count);
815         LASSERT(desc->bd_enc_vec);
816         LASSERT(desc->bd_nob_transferred);
817
818         blocksize = crypto_sync_skcipher_blocksize(tfm);
819         LASSERT(blocksize > 1);
820         LASSERT(cipher->len == blocksize + sizeof(*khdr));
821
822         if (desc->bd_nob_transferred % blocksize) {
823                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
824                 return -EPROTO;
825         }
826
827         /* decrypt head (confounder) */
828         rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
829         if (rc != 0)
830                 return rc;
831
832         rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
833         if (rc != 0) {
834                 gss_teardown_sgtable(&sg_src);
835                 return rc;
836         }
837
838         skcipher_request_set_sync_tfm(req, tfm);
839         skcipher_request_set_callback(req, 0, NULL, NULL);
840         skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
841                                    blocksize, local_iv);
842
843         rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
844
845         gss_teardown_sgtable(&sg_dst);
846         gss_teardown_sgtable(&sg_src);
847
848         if (rc) {
849                 CERROR("error to decrypt confounder: %d\n", rc);
850                 skcipher_request_zero(req);
851                 return rc;
852         }
853
854         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
855              i++) {
856                 if (desc->bd_enc_vec[i].bv_offset % blocksize != 0 ||
857                     desc->bd_enc_vec[i].bv_len % blocksize != 0) {
858                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
859                                i, desc->bd_enc_vec[i].bv_offset,
860                                desc->bd_enc_vec[i].bv_len,
861                                blocksize);
862                         skcipher_request_zero(req);
863                         return -EFAULT;
864                 }
865
866                 if (adj_nob) {
867                         if (ct_nob + desc->bd_enc_vec[i].bv_len >
868                             desc->bd_nob_transferred)
869                                 desc->bd_enc_vec[i].bv_len =
870                                         desc->bd_nob_transferred - ct_nob;
871
872                         desc->bd_vec[i].bv_len =
873                           desc->bd_enc_vec[i].bv_len;
874                         if (pt_nob + desc->bd_enc_vec[i].bv_len >
875                             desc->bd_nob)
876                                 desc->bd_vec[i].bv_len =
877                                   desc->bd_nob - pt_nob;
878                 } else {
879                         /* this should be guaranteed by LNET */
880                         LASSERT(ct_nob + desc->bd_enc_vec[i].
881                                 bv_len <=
882                                 desc->bd_nob_transferred);
883                         LASSERT(desc->bd_vec[i].bv_len <=
884                                 desc->bd_enc_vec[i].bv_len);
885                 }
886
887                 if (desc->bd_enc_vec[i].bv_len == 0)
888                         continue;
889
890                 sg_init_table(&src, 1);
891                 sg_set_page(&src, desc->bd_enc_vec[i].bv_page,
892                             desc->bd_enc_vec[i].bv_len,
893                             desc->bd_enc_vec[i].bv_offset);
894                 dst = src;
895                 if (desc->bd_vec[i].bv_len % blocksize == 0)
896                         sg_assign_page(&dst,
897                                        desc->bd_vec[i].bv_page);
898
899                 skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
900                                            src.length, local_iv);
901                 rc = crypto_skcipher_decrypt_iv(req, &dst, &src, src.length);
902                 if (rc) {
903                         CERROR("error to decrypt page: %d\n", rc);
904                         skcipher_request_zero(req);
905                         return rc;
906                 }
907
908                 if (desc->bd_vec[i].bv_len % blocksize != 0) {
909                         memcpy(page_address(desc->bd_vec[i].bv_page) +
910                                desc->bd_vec[i].bv_offset,
911                                page_address(desc->bd_enc_vec[i].
912                                             bv_page) +
913                                desc->bd_vec[i].bv_offset,
914                                desc->bd_vec[i].bv_len);
915                 }
916
917                 ct_nob += desc->bd_enc_vec[i].bv_len;
918                 pt_nob += desc->bd_vec[i].bv_len;
919         }
920
921         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
922                 CERROR("%d cipher text transferred but only %d decrypted\n",
923                        desc->bd_nob_transferred, ct_nob);
924                 skcipher_request_zero(req);
925                 return -EFAULT;
926         }
927
928         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
929                 CERROR("%d plain text expected but only %d received\n",
930                        desc->bd_nob, pt_nob);
931                 skcipher_request_zero(req);
932                 return -EFAULT;
933         }
934
935         /* if needed, clear up the rest unused iovs */
936         if (adj_nob)
937                 while (i < desc->bd_iov_count)
938                         desc->bd_vec[i++].bv_len = 0;
939
940         /* decrypt tail (krb5 header) */
941         rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
942                                sizeof(*khdr));
943         if (rc != 0)
944                 return rc;
945
946         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
947                                sizeof(*khdr));
948         if (rc != 0) {
949                 gss_teardown_sgtable(&sg_src);
950                 return rc;
951         }
952
953         skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
954                                   src.length, local_iv);
955         rc = crypto_skcipher_decrypt_iv(req, sg_dst.sgl, sg_src.sgl,
956                                         sizeof(*khdr));
957         gss_teardown_sgtable(&sg_src);
958         gss_teardown_sgtable(&sg_dst);
959
960         skcipher_request_zero(req);
961         if (rc) {
962                 CERROR("error to decrypt tail: %d\n", rc);
963                 return rc;
964         }
965
966         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
967                 CERROR("krb5 header doesn't match\n");
968                 return -EACCES;
969         }
970
971         return 0;
972 }
973
974 static
975 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
976                         rawobj_t *gsshdr,
977                         rawobj_t *msg,
978                         int msg_buflen,
979                         rawobj_t *token)
980 {
981         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
982         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
983         struct krb5_header  *khdr;
984         int                  blocksize;
985         rawobj_t             cksum = RAWOBJ_EMPTY;
986         rawobj_t             data_desc[3], cipher;
987         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
988         __u8                 local_iv[16] = {0};
989         u32 major;
990         int                  rc = 0;
991
992         LASSERT(ke);
993         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
994         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
995                 ke->ke_conf_size >=
996                 crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm));
997
998         /*
999          * final token format:
1000          * ---------------------------------------------------
1001          * | krb5 header | cipher text | checksum (16 bytes) |
1002          * ---------------------------------------------------
1003          */
1004
1005         /* fill krb5 header */
1006         LASSERT(token->len >= sizeof(*khdr));
1007         khdr = (struct krb5_header *)token->data;
1008         fill_krb5_header(kctx, khdr, 1);
1009
1010         /* generate confounder */
1011         get_random_bytes(conf, ke->ke_conf_size);
1012
1013         /* get encryption blocksize. note kc_keye might not associated with
1014          * a tfm, currently only for arcfour-hmac */
1015         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1016                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1017                 blocksize = 1;
1018         } else {
1019                 LASSERT(kctx->kc_keye.kb_tfm);
1020                 blocksize = crypto_sync_skcipher_blocksize(
1021                                                         kctx->kc_keye.kb_tfm);
1022         }
1023         LASSERT(blocksize <= ke->ke_conf_size);
1024
1025         /* padding the message */
1026         if (gss_add_padding(msg, msg_buflen, blocksize))
1027                 return GSS_S_FAILURE;
1028
1029         /*
1030          * clear text layout for checksum:
1031          * ------------------------------------------------------
1032          * | confounder | gss header | clear msgs | krb5 header |
1033          * ------------------------------------------------------
1034          */
1035         data_desc[0].data = conf;
1036         data_desc[0].len = ke->ke_conf_size;
1037         data_desc[1].data = gsshdr->data;
1038         data_desc[1].len = gsshdr->len;
1039         data_desc[2].data = msg->data;
1040         data_desc[2].len = msg->len;
1041
1042         /* compute checksum */
1043         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1044                                khdr, 3, data_desc, 0, NULL, &cksum,
1045                                gctx->hash_func))
1046                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1047         LASSERT(cksum.len >= ke->ke_hash_size);
1048
1049         /*
1050          * clear text layout for encryption:
1051          * -----------------------------------------
1052          * | confounder | clear msgs | krb5 header |
1053          * -----------------------------------------
1054          */
1055         data_desc[0].data = conf;
1056         data_desc[0].len = ke->ke_conf_size;
1057         data_desc[1].data = msg->data;
1058         data_desc[1].len = msg->len;
1059         data_desc[2].data = (__u8 *) khdr;
1060         data_desc[2].len = sizeof(*khdr);
1061
1062         /* cipher text will be directly inplace */
1063         cipher.data = (__u8 *)(khdr + 1);
1064         cipher.len = token->len - sizeof(*khdr);
1065         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1066
1067         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1068                 rawobj_t arc4_keye = RAWOBJ_EMPTY;
1069                 struct crypto_sync_skcipher *arc4_tfm;
1070
1071                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1072                                        NULL, 1, &cksum, 0, NULL, &arc4_keye,
1073                                        gctx->hash_func)) {
1074                         CERROR("failed to obtain arc4 enc key\n");
1075                         GOTO(arc4_out_key, rc = -EACCES);
1076                 }
1077
1078                 arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
1079                 if (IS_ERR(arc4_tfm)) {
1080                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1081                         GOTO(arc4_out_key, rc = -EACCES);
1082                 }
1083
1084                 if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
1085                                                 arc4_keye.len)) {
1086                         CERROR("failed to set arc4 key, len %d\n",
1087                                arc4_keye.len);
1088                         GOTO(arc4_out_tfm, rc = -EACCES);
1089                 }
1090
1091                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
1092                                        &cipher, 1);
1093 arc4_out_tfm:
1094                 crypto_free_sync_skcipher(arc4_tfm);
1095 arc4_out_key:
1096                 rawobj_free(&arc4_keye);
1097         } else {
1098                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
1099                                        data_desc, &cipher, 1);
1100         }
1101
1102         if (rc)
1103                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1104
1105         /* fill in checksum */
1106         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1107         memcpy((char *)(khdr + 1) + cipher.len,
1108                cksum.data + cksum.len - ke->ke_hash_size,
1109                ke->ke_hash_size);
1110
1111         /* final token length */
1112         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1113         major = GSS_S_COMPLETE;
1114 out_free_cksum:
1115         rawobj_free(&cksum);
1116         return major;
1117 }
1118
1119 static
1120 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1121                              struct ptlrpc_bulk_desc *desc)
1122 {
1123         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1124         int                  blocksize, i;
1125
1126         LASSERT(desc->bd_iov_count);
1127         LASSERT(desc->bd_enc_vec);
1128         LASSERT(kctx->kc_keye.kb_tfm);
1129
1130         blocksize = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
1131
1132         for (i = 0; i < desc->bd_iov_count; i++) {
1133                 LASSERT(desc->bd_enc_vec[i].bv_page);
1134                 /*
1135                  * offset should always start at page boundary of either
1136                  * client or server side.
1137                  */
1138                 if (desc->bd_vec[i].bv_offset & blocksize) {
1139                         CERROR("odd offset %d in page %d\n",
1140                                desc->bd_vec[i].bv_offset, i);
1141                         return GSS_S_FAILURE;
1142                 }
1143
1144                 desc->bd_enc_vec[i].bv_offset =
1145                         desc->bd_vec[i].bv_offset;
1146                 desc->bd_enc_vec[i].bv_len =
1147                         (desc->bd_vec[i].bv_len +
1148                          blocksize - 1) & (~(blocksize - 1));
1149         }
1150
1151         return GSS_S_COMPLETE;
1152 }
1153
1154 static
1155 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1156                              struct ptlrpc_bulk_desc *desc,
1157                              rawobj_t *token, int adj_nob)
1158 {
1159         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1160         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1161         struct krb5_header  *khdr;
1162         int                  blocksz;
1163         rawobj_t             cksum = RAWOBJ_EMPTY;
1164         rawobj_t             data_desc[1], cipher;
1165         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1166         int rc = 0;
1167         u32 major;
1168
1169         LASSERT(ke);
1170         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1171
1172         /*
1173          * final token format:
1174          * --------------------------------------------------
1175          * | krb5 header | head/tail cipher text | checksum |
1176          * --------------------------------------------------
1177          */
1178
1179         /* fill krb5 header */
1180         LASSERT(token->len >= sizeof(*khdr));
1181         khdr = (struct krb5_header *)token->data;
1182         fill_krb5_header(kctx, khdr, 1);
1183
1184         /* generate confounder */
1185         get_random_bytes(conf, ke->ke_conf_size);
1186
1187         /* get encryption blocksize. note kc_keye might not associated with
1188          * a tfm, currently only for arcfour-hmac */
1189         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1190                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1191                 blocksz = 1;
1192         } else {
1193                 LASSERT(kctx->kc_keye.kb_tfm);
1194                 blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
1195         }
1196
1197         /*
1198          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1199          * the bulk token size would be exactly (sizeof(krb5_header) +
1200          * blocksize + sizeof(krb5_header) + hashsize)
1201          */
1202         LASSERT(blocksz <= ke->ke_conf_size);
1203         LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
1204         LASSERT(token->len >= sizeof(*khdr) + blocksz + sizeof(*khdr) + 16);
1205
1206         /*
1207          * clear text layout for checksum:
1208          * ------------------------------------------
1209          * | confounder | clear pages | krb5 header |
1210          * ------------------------------------------
1211          */
1212         data_desc[0].data = conf;
1213         data_desc[0].len = ke->ke_conf_size;
1214
1215         /* compute checksum */
1216         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1217                                khdr, 1, data_desc,
1218                                desc->bd_iov_count, desc->bd_vec,
1219                                &cksum, gctx->hash_func))
1220                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1221         LASSERT(cksum.len >= ke->ke_hash_size);
1222
1223         /*
1224          * clear text layout for encryption:
1225          * ------------------------------------------
1226          * | confounder | clear pages | krb5 header |
1227          * ------------------------------------------
1228          *        |              |             |
1229          *        ----------  (cipher pages)   |
1230          * result token:   |                   |
1231          * -------------------------------------------
1232          * | krb5 header | cipher text | cipher text |
1233          * -------------------------------------------
1234          */
1235         data_desc[0].data = conf;
1236         data_desc[0].len = ke->ke_conf_size;
1237
1238         cipher.data = (__u8 *)(khdr + 1);
1239         cipher.len = blocksz + sizeof(*khdr);
1240
1241         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1242                 LBUG();
1243                 rc = 0;
1244         } else {
1245                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1246                                        conf, desc, &cipher, adj_nob);
1247         }
1248         if (rc)
1249                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1250
1251         /* fill in checksum */
1252         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1253         memcpy((char *)(khdr + 1) + cipher.len,
1254                cksum.data + cksum.len - ke->ke_hash_size,
1255                ke->ke_hash_size);
1256
1257         /* final token length */
1258         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1259         major = GSS_S_COMPLETE;
1260 out_free_cksum:
1261         rawobj_free(&cksum);
1262         return major;
1263 }
1264
1265 static
1266 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1267                           rawobj_t        *gsshdr,
1268                           rawobj_t        *token,
1269                           rawobj_t        *msg)
1270 {
1271         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1272         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1273         struct krb5_header  *khdr;
1274         unsigned char       *tmpbuf;
1275         int                  blocksz, bodysize;
1276         rawobj_t             cksum = RAWOBJ_EMPTY;
1277         rawobj_t             cipher_in, plain_out;
1278         rawobj_t             hash_objs[3];
1279         int                  rc = 0;
1280         __u32                major;
1281         __u8                 local_iv[16] = {0};
1282
1283         LASSERT(ke);
1284
1285         if (token->len < sizeof(*khdr)) {
1286                 CERROR("short signature: %u\n", token->len);
1287                 return GSS_S_DEFECTIVE_TOKEN;
1288         }
1289
1290         khdr = (struct krb5_header *)token->data;
1291
1292         major = verify_krb5_header(kctx, khdr, 1);
1293         if (major != GSS_S_COMPLETE) {
1294                 CERROR("bad krb5 header\n");
1295                 return major;
1296         }
1297
1298         /* block size */
1299         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1300                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1301                 blocksz = 1;
1302         } else {
1303                 LASSERT(kctx->kc_keye.kb_tfm);
1304                 blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
1305         }
1306
1307         /* expected token layout:
1308          * ----------------------------------------
1309          * | krb5 header | cipher text | checksum |
1310          * ----------------------------------------
1311          */
1312         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1313
1314         if (bodysize % blocksz) {
1315                 CERROR("odd bodysize %d\n", bodysize);
1316                 return GSS_S_DEFECTIVE_TOKEN;
1317         }
1318
1319         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1320                 CERROR("incomplete token: bodysize %d\n", bodysize);
1321                 return GSS_S_DEFECTIVE_TOKEN;
1322         }
1323
1324         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1325                 CERROR("buffer too small: %u, require %d\n",
1326                        msg->len, bodysize - ke->ke_conf_size);
1327                 return GSS_S_FAILURE;
1328         }
1329
1330         /* decrypting */
1331         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1332         if (!tmpbuf)
1333                 return GSS_S_FAILURE;
1334
1335         major = GSS_S_FAILURE;
1336
1337         cipher_in.data = (__u8 *)(khdr + 1);
1338         cipher_in.len = bodysize;
1339         plain_out.data = tmpbuf;
1340         plain_out.len = bodysize;
1341
1342         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1343                 rawobj_t                 arc4_keye;
1344                 struct crypto_sync_skcipher *arc4_tfm;
1345
1346                 cksum.data = token->data + token->len - ke->ke_hash_size;
1347                 cksum.len = ke->ke_hash_size;
1348
1349                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1350                                        NULL, 1, &cksum, 0, NULL, &arc4_keye,
1351                                        gctx->hash_func)) {
1352                         CERROR("failed to obtain arc4 enc key\n");
1353                         GOTO(arc4_out, rc = -EACCES);
1354                 }
1355
1356                 arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
1357                 if (IS_ERR(arc4_tfm)) {
1358                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1359                         GOTO(arc4_out_key, rc = -EACCES);
1360                 }
1361
1362                 if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
1363                                                 arc4_keye.len)) {
1364                         CERROR("failed to set arc4 key, len %d\n",
1365                                arc4_keye.len);
1366                         GOTO(arc4_out_tfm, rc = -EACCES);
1367                 }
1368
1369                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
1370                                        &plain_out, 0);
1371 arc4_out_tfm:
1372                 crypto_free_sync_skcipher(arc4_tfm);
1373 arc4_out_key:
1374                 rawobj_free(&arc4_keye);
1375 arc4_out:
1376                 cksum = RAWOBJ_EMPTY;
1377         } else {
1378                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1,
1379                                        &cipher_in, &plain_out, 0);
1380         }
1381
1382         if (rc != 0) {
1383                 CERROR("error decrypt\n");
1384                 goto out_free;
1385         }
1386         LASSERT(plain_out.len == bodysize);
1387
1388         /* expected clear text layout:
1389          * -----------------------------------------
1390          * | confounder | clear msgs | krb5 header |
1391          * -----------------------------------------
1392          */
1393
1394         /* verify krb5 header in token is not modified */
1395         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1396                    sizeof(*khdr))) {
1397                 CERROR("decrypted krb5 header mismatch\n");
1398                 goto out_free;
1399         }
1400
1401         /* verify checksum, compose clear text as layout:
1402          * ------------------------------------------------------
1403          * | confounder | gss header | clear msgs | krb5 header |
1404          * ------------------------------------------------------
1405          */
1406         hash_objs[0].len = ke->ke_conf_size;
1407         hash_objs[0].data = plain_out.data;
1408         hash_objs[1].len = gsshdr->len;
1409         hash_objs[1].data = gsshdr->data;
1410         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1411         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1412         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1413                                khdr, 3, hash_objs, 0, NULL, &cksum,
1414                                gctx->hash_func))
1415                 goto out_free;
1416
1417         LASSERT(cksum.len >= ke->ke_hash_size);
1418         if (memcmp((char *)(khdr + 1) + bodysize,
1419                    cksum.data + cksum.len - ke->ke_hash_size,
1420                    ke->ke_hash_size)) {
1421                 CERROR("checksum mismatch\n");
1422                 goto out_free;
1423         }
1424
1425         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1426         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1427
1428         major = GSS_S_COMPLETE;
1429 out_free:
1430         OBD_FREE_LARGE(tmpbuf, bodysize);
1431         rawobj_free(&cksum);
1432         return major;
1433 }
1434
1435 static
1436 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1437                                struct ptlrpc_bulk_desc *desc,
1438                                rawobj_t *token, int adj_nob)
1439 {
1440         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1441         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1442         struct krb5_header  *khdr;
1443         int                  blocksz;
1444         rawobj_t             cksum = RAWOBJ_EMPTY;
1445         rawobj_t             cipher, plain;
1446         rawobj_t             data_desc[1];
1447         int                  rc;
1448         __u32                major;
1449
1450         LASSERT(ke);
1451
1452         if (token->len < sizeof(*khdr)) {
1453                 CERROR("short signature: %u\n", token->len);
1454                 return GSS_S_DEFECTIVE_TOKEN;
1455         }
1456
1457         khdr = (struct krb5_header *)token->data;
1458
1459         major = verify_krb5_header(kctx, khdr, 1);
1460         if (major != GSS_S_COMPLETE) {
1461                 CERROR("bad krb5 header\n");
1462                 return major;
1463         }
1464
1465         /* block size */
1466         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1467                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1468                 blocksz = 1;
1469                 LBUG();
1470         } else {
1471                 LASSERT(kctx->kc_keye.kb_tfm);
1472                 blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
1473         }
1474         LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
1475
1476         /*
1477          * token format is expected as:
1478          * -----------------------------------------------
1479          * | krb5 header | head/tail cipher text | cksum |
1480          * -----------------------------------------------
1481          */
1482         if (token->len < sizeof(*khdr) + blocksz + sizeof(*khdr) +
1483             ke->ke_hash_size) {
1484                 CERROR("short token size: %u\n", token->len);
1485                 return GSS_S_DEFECTIVE_TOKEN;
1486         }
1487
1488         cipher.data = (__u8 *) (khdr + 1);
1489         cipher.len = blocksz + sizeof(*khdr);
1490         plain.data = cipher.data;
1491         plain.len = cipher.len;
1492
1493         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1494                                desc, &cipher, &plain, adj_nob);
1495         if (rc)
1496                 return GSS_S_DEFECTIVE_TOKEN;
1497
1498         /*
1499          * verify checksum, compose clear text as layout:
1500          * ------------------------------------------
1501          * | confounder | clear pages | krb5 header |
1502          * ------------------------------------------
1503          */
1504         data_desc[0].data = plain.data;
1505         data_desc[0].len = blocksz;
1506
1507         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1508                                khdr, 1, data_desc,
1509                                desc->bd_iov_count,
1510                                desc->bd_vec,
1511                                &cksum, gctx->hash_func))
1512                 return GSS_S_FAILURE;
1513         LASSERT(cksum.len >= ke->ke_hash_size);
1514
1515         if (memcmp(plain.data + blocksz + sizeof(*khdr),
1516                    cksum.data + cksum.len - ke->ke_hash_size,
1517                    ke->ke_hash_size)) {
1518                 CERROR("checksum mismatch\n");
1519                 rawobj_free(&cksum);
1520                 return GSS_S_BAD_SIG;
1521         }
1522
1523         rawobj_free(&cksum);
1524         return GSS_S_COMPLETE;
1525 }
1526
1527 int gss_display_kerberos(struct gss_ctx        *ctx,
1528                          char                  *buf,
1529                          int                    bufsize)
1530 {
1531         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1532         int                 written;
1533
1534         written = scnprintf(buf, bufsize, "krb5 (%s)",
1535                             enctype2str(kctx->kc_enctype));
1536         return written;
1537 }
1538
1539 static struct gss_api_ops gss_kerberos_ops = {
1540         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1541         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1542         .gss_inquire_context        = gss_inquire_context_kerberos,
1543         .gss_get_mic                = gss_get_mic_kerberos,
1544         .gss_verify_mic             = gss_verify_mic_kerberos,
1545         .gss_wrap                   = gss_wrap_kerberos,
1546         .gss_unwrap                 = gss_unwrap_kerberos,
1547         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1548         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1549         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1550         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1551         .gss_display                = gss_display_kerberos,
1552 };
1553
1554 static struct subflavor_desc gss_kerberos_sfs[] = {
1555         {
1556                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1557                 .sf_qop         = 0,
1558                 .sf_service     = SPTLRPC_SVC_NULL,
1559                 .sf_name        = "krb5n"
1560         },
1561         {
1562                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1563                 .sf_qop         = 0,
1564                 .sf_service     = SPTLRPC_SVC_AUTH,
1565                 .sf_name        = "krb5a"
1566         },
1567         {
1568                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1569                 .sf_qop         = 0,
1570                 .sf_service     = SPTLRPC_SVC_INTG,
1571                 .sf_name        = "krb5i"
1572         },
1573         {
1574                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1575                 .sf_qop         = 0,
1576                 .sf_service     = SPTLRPC_SVC_PRIV,
1577                 .sf_name        = "krb5p"
1578         },
1579 };
1580
1581 static struct gss_api_mech gss_kerberos_mech = {
1582         /* .gm_owner uses default NULL value for THIS_MODULE */
1583         .gm_name        = "krb5",
1584         .gm_oid         = (rawobj_t)
1585                                 {9, "\052\206\110\206\367\022\001\002\002"},
1586         .gm_ops         = &gss_kerberos_ops,
1587         .gm_sf_num      = 4,
1588         .gm_sfs         = gss_kerberos_sfs,
1589 };
1590
1591 int __init init_kerberos_module(void)
1592 {
1593         int status;
1594
1595         status = lgss_mech_register(&gss_kerberos_mech);
1596         if (status)
1597                 CERROR("Failed to register kerberos gss mechanism!\n");
1598         return status;
1599 }
1600
1601 void cleanup_kerberos_module(void)
1602 {
1603         lgss_mech_unregister(&gss_kerberos_mech);
1604 }