Whamcloud - gitweb
39ef0c20775b12dd5e9e9cf9706064ca181fab55
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, 2015, Intel Corporation.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/random.h>
55 #include <linux/slab.h>
56 #include <linux/crypto.h>
57 #include <linux/mutex.h>
58
59 #include <obd.h>
60 #include <obd_class.h>
61 #include <obd_support.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
65
66 #include "gss_err.h"
67 #include "gss_internal.h"
68 #include "gss_api.h"
69 #include "gss_asn1.h"
70 #include "gss_krb5.h"
71 #include "gss_crypto.h"
72
73 static DEFINE_SPINLOCK(krb5_seq_lock);
74
75 struct krb5_enctype {
76         char           *ke_dispname;
77         char           *ke_enc_name;            /* linux tfm name */
78         char           *ke_hash_name;           /* linux tfm name */
79         int             ke_enc_mode;            /* linux tfm mode */
80         int             ke_hash_size;           /* checksum size */
81         int             ke_conf_size;           /* confounder size */
82         unsigned int    ke_hash_hmac:1;         /* is hmac? */
83 };
84
85 /*
86  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
87  * but currently we simply CBC with padding, because linux doesn't support CTS
88  * yet. this need to be fixed in the future.
89  */
90 static struct krb5_enctype enctypes[] = {
91         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
92                 .ke_dispname    = "des-cbc-md5",
93                 .ke_enc_name    = "cbc(des)",
94                 .ke_hash_name   = "md5",
95                 .ke_hash_size   = 16,
96                 .ke_conf_size   = 8,
97         },
98         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
99                 .ke_dispname    = "des3-hmac-sha1",
100                 .ke_enc_name    = "cbc(des3_ede)",
101                 .ke_hash_name   = "sha1",
102                 .ke_hash_size   = 20,
103                 .ke_conf_size   = 8,
104                 .ke_hash_hmac   = 1,
105         },
106         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
107                 .ke_dispname    = "aes128-cts-hmac-sha1-96",
108                 .ke_enc_name    = "cbc(aes)",
109                 .ke_hash_name   = "sha1",
110                 .ke_hash_size   = 12,
111                 .ke_conf_size   = 16,
112                 .ke_hash_hmac   = 1,
113         },
114         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
115                 .ke_dispname    = "aes256-cts-hmac-sha1-96",
116                 .ke_enc_name    = "cbc(aes)",
117                 .ke_hash_name   = "sha1",
118                 .ke_hash_size   = 12,
119                 .ke_conf_size   = 16,
120                 .ke_hash_hmac   = 1,
121         },
122         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
123                 .ke_dispname    = "arcfour-hmac-md5",
124                 .ke_enc_name    = "ecb(arc4)",
125                 .ke_hash_name   = "md5",
126                 .ke_hash_size   = 16,
127                 .ke_conf_size   = 8,
128                 .ke_hash_hmac   = 1,
129         }
130 };
131
132 static const char * enctype2str(__u32 enctype)
133 {
134         if (enctype < ARRAY_SIZE(enctypes) && enctypes[enctype].ke_dispname)
135                 return enctypes[enctype].ke_dispname;
136
137         return "unknown";
138 }
139
140 static
141 int krb5_init_keys(struct krb5_ctx *kctx)
142 {
143         struct krb5_enctype *ke;
144
145         if (kctx->kc_enctype >= ARRAY_SIZE(enctypes) ||
146             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
147                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
148                 return -1;
149         }
150
151         ke = &enctypes[kctx->kc_enctype];
152
153         /* tfm arc4 is stateful, user should alloc-use-free by his own */
154         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
155             gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
156                 return -1;
157
158         /* tfm hmac is stateful, user should alloc-use-free by his own */
159         if (ke->ke_hash_hmac == 0 &&
160             gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
161                 return -1;
162         if (ke->ke_hash_hmac == 0 &&
163             gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
164                 return -1;
165
166         return 0;
167 }
168
169 static
170 void delete_context_kerberos(struct krb5_ctx *kctx)
171 {
172         rawobj_free(&kctx->kc_mech_used);
173
174         gss_keyblock_free(&kctx->kc_keye);
175         gss_keyblock_free(&kctx->kc_keyi);
176         gss_keyblock_free(&kctx->kc_keyc);
177 }
178
179 static
180 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
181 {
182         unsigned int    tmp_uint, keysize;
183
184         /* seed_init flag */
185         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
186                 goto out_err;
187         kctx->kc_seed_init = (tmp_uint != 0);
188
189         /* seed */
190         if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
191                 goto out_err;
192
193         /* sign/seal algorithm, not really used now */
194         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
195             gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
196                 goto out_err;
197
198         /* end time. While kc_endtime might be 64 bit the krb5 API
199          * still uses 32 bits. To delay the 2038 bug see the incoming
200          * value as a u32 which give us until 2106. See the link for details:
201          *
202          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
203          */
204         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
205                 goto out_err;
206
207         /* seq send */
208         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
209                 goto out_err;
210         kctx->kc_seq_send = tmp_uint;
211
212         /* mech oid */
213         if (gss_get_rawobj(&p, end, &kctx->kc_mech_used))
214                 goto out_err;
215
216         /* old style enc/seq keys in format:
217          *   - enctype (u32)
218          *   - keysize (u32)
219          *   - keydata
220          * we decompose them to fit into the new context
221          */
222
223         /* enc key */
224         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
225                 goto out_err;
226
227         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
228                 goto out_err;
229
230         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
231                 goto out_err;
232
233         /* seq key */
234         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
235             tmp_uint != kctx->kc_enctype)
236                 goto out_err;
237
238         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
239             tmp_uint != keysize)
240                 goto out_err;
241
242         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
243                 goto out_err;
244
245         /* old style fallback */
246         if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
247                 goto out_err;
248
249         if (p != end)
250                 goto out_err;
251
252         CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
253         return 0;
254 out_err:
255         return GSS_S_FAILURE;
256 }
257
258 /* Flags for version 2 context flags */
259 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
260 #define KRB5_CTX_FLAG_CFX               0x00000002
261 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
262
263 static
264 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
265 {
266         unsigned int    tmp_uint, keysize;
267
268         /* end time. While kc_endtime might be 64 bit the krb5 API
269          * still uses 32 bits. To delay the 2038 bug see the incoming
270          * value as a u32 which give us until 2106. See the link for details:
271          *
272          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
273          */
274         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
275                 goto out_err;
276
277         /* flags */
278         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
279                 goto out_err;
280
281         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
282                 kctx->kc_initiate = 1;
283         if (tmp_uint & KRB5_CTX_FLAG_CFX)
284                 kctx->kc_cfx = 1;
285         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
286                 kctx->kc_have_acceptor_subkey = 1;
287
288         /* seq send */
289         if (gss_get_bytes(&p, end, &kctx->kc_seq_send,
290             sizeof(kctx->kc_seq_send)))
291                 goto out_err;
292
293         /* enctype */
294         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
295                 goto out_err;
296
297         /* size of each key */
298         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
299                 goto out_err;
300
301         /* number of keys - should always be 3 */
302         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
303                 goto out_err;
304
305         if (tmp_uint != 3) {
306                 CERROR("Invalid number of keys: %u\n", tmp_uint);
307                 goto out_err;
308         }
309
310         /* ke */
311         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
312                 goto out_err;
313         /* ki */
314         if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize))
315                 goto out_err;
316         /* ki */
317         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
318                 goto out_err;
319
320         CDEBUG(D_SEC, "successfully imported v2 context\n");
321         return 0;
322 out_err:
323         return GSS_S_FAILURE;
324 }
325
326 /*
327  * The whole purpose here is trying to keep user level gss context parsing
328  * from nfs-utils unchanged as possible as we can, they are not quite mature
329  * yet, and many stuff still not clear, like heimdal etc.
330  */
331 static
332 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
333                                       struct gss_ctx *gctx)
334 {
335         struct krb5_ctx *kctx;
336         char *p = (char *)inbuf->data;
337         char *end = (char *)(inbuf->data + inbuf->len);
338         unsigned int tmp_uint, rc;
339
340         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
341                 CERROR("Fail to read version\n");
342                 return GSS_S_FAILURE;
343         }
344
345         /* only support 0, 1 for the moment */
346         if (tmp_uint > 2) {
347                 CERROR("Invalid version %u\n", tmp_uint);
348                 return GSS_S_FAILURE;
349         }
350
351         OBD_ALLOC_PTR(kctx);
352         if (!kctx)
353                 return GSS_S_FAILURE;
354
355         if (tmp_uint == 0 || tmp_uint == 1) {
356                 kctx->kc_initiate = tmp_uint;
357                 rc = import_context_rfc1964(kctx, p, end);
358         } else {
359                 rc = import_context_rfc4121(kctx, p, end);
360         }
361
362         if (rc == 0)
363                 rc = krb5_init_keys(kctx);
364
365         if (rc) {
366                 delete_context_kerberos(kctx);
367                 OBD_FREE_PTR(kctx);
368
369                 return GSS_S_FAILURE;
370         }
371
372         gctx->internal_ctx_id = kctx;
373         return GSS_S_COMPLETE;
374 }
375
376 static
377 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
378                                         struct gss_ctx *gctx_new)
379 {
380         struct krb5_ctx *kctx = gctx->internal_ctx_id;
381         struct krb5_ctx *knew;
382
383         OBD_ALLOC_PTR(knew);
384         if (!knew)
385                 return GSS_S_FAILURE;
386
387         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
388         knew->kc_cfx = kctx->kc_cfx;
389         knew->kc_seed_init = kctx->kc_seed_init;
390         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
391         knew->kc_endtime = kctx->kc_endtime;
392
393         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
394         knew->kc_seq_send = kctx->kc_seq_recv;
395         knew->kc_seq_recv = kctx->kc_seq_send;
396         knew->kc_enctype = kctx->kc_enctype;
397
398         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
399                 goto out_err;
400
401         if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
402                 goto out_err;
403         if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
404                 goto out_err;
405         if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
406                 goto out_err;
407         if (krb5_init_keys(knew))
408                 goto out_err;
409
410         gctx_new->internal_ctx_id = knew;
411         CDEBUG(D_SEC, "successfully copied reverse context\n");
412         return GSS_S_COMPLETE;
413
414 out_err:
415         delete_context_kerberos(knew);
416         OBD_FREE_PTR(knew);
417         return GSS_S_FAILURE;
418 }
419
420 static
421 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
422                                    time64_t *endtime)
423 {
424         struct krb5_ctx *kctx = gctx->internal_ctx_id;
425
426         *endtime = kctx->kc_endtime;
427         return GSS_S_COMPLETE;
428 }
429
430 static
431 void gss_delete_sec_context_kerberos(void *internal_ctx)
432 {
433         struct krb5_ctx *kctx = internal_ctx;
434
435         delete_context_kerberos(kctx);
436         OBD_FREE_PTR(kctx);
437 }
438
439 /*
440  * compute (keyed/keyless) checksum against the plain text which appended
441  * with krb5 wire token header.
442  */
443 static
444 __s32 krb5_make_checksum(__u32 enctype,
445                          struct gss_keyblock *kb,
446                          struct krb5_header *khdr,
447                          int msgcnt, rawobj_t *msgs,
448                          int iovcnt, lnet_kiov_t *iovs,
449                          rawobj_t *cksum)
450 {
451         struct krb5_enctype *ke = &enctypes[enctype];
452         struct ahash_request *req = NULL;
453         enum cfs_crypto_hash_alg hash_algo;
454         rawobj_t hdr;
455         int rc;
456
457         hash_algo = cfs_crypto_hash_alg(ke->ke_hash_name);
458
459         /* For the cbc(des) case we want md5 instead of hmac(md5) */
460         if (strcmp(ke->ke_enc_name, "cbc(des)"))
461                 req = cfs_crypto_hash_init(hash_algo, kb->kb_key.data,
462                                            kb->kb_key.len);
463         else
464                 req = cfs_crypto_hash_init(hash_algo, NULL, 0);
465         if (IS_ERR(req)) {
466                 rc = PTR_ERR(req);
467                 CERROR("failed to alloc hash %s : rc = %d\n",
468                        ke->ke_hash_name, rc);
469                 goto out_no_hash;
470         }
471
472         cksum->len = cfs_crypto_hash_digestsize(hash_algo);
473         OBD_ALLOC_LARGE(cksum->data, cksum->len);
474         if (!cksum->data) {
475                 cksum->len = 0;
476                 rc = -ENOMEM;
477                 goto out_free_hash;
478         }
479
480         hdr.data = (__u8 *)khdr;
481         hdr.len = sizeof(*khdr);
482
483         rc = gss_digest_hash(req, &hdr, msgcnt, msgs,
484                              iovcnt, iovs, cksum);
485         if (rc)
486                 goto out_free_hash;
487
488         if (!ke->ke_hash_hmac) {
489                 LASSERT(kb->kb_tfm);
490
491                 cfs_crypto_hash_final(req, cksum->data, &cksum->len);
492                 rc = gss_crypt_generic(kb->kb_tfm, 0, NULL,
493                                        cksum->data, cksum->data,
494                                        cksum->len);
495                 goto out_no_hash;
496         }
497
498 out_free_hash:
499         if (req)
500                 cfs_crypto_hash_final(req, cksum->data, &cksum->len);
501 out_no_hash:
502         return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
503 }
504
505 static void fill_krb5_header(struct krb5_ctx *kctx,
506                              struct krb5_header *khdr,
507                              int privacy)
508 {
509         unsigned char acceptor_flag;
510
511         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
512
513         if (privacy) {
514                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
515                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
516                 khdr->kh_ec = cpu_to_be16(0);
517                 khdr->kh_rrc = cpu_to_be16(0);
518         } else {
519                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
520                 khdr->kh_flags = acceptor_flag;
521                 khdr->kh_ec = cpu_to_be16(0xffff);
522                 khdr->kh_rrc = cpu_to_be16(0xffff);
523         }
524
525         khdr->kh_filler = 0xff;
526         spin_lock(&krb5_seq_lock);
527         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
528         spin_unlock(&krb5_seq_lock);
529 }
530
531 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
532                                 struct krb5_header *khdr,
533                                 int privacy)
534 {
535         unsigned char acceptor_flag;
536         __u16         tok_id, ec_rrc;
537
538         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
539
540         if (privacy) {
541                 tok_id = KG_TOK_WRAP_MSG;
542                 ec_rrc = 0x0;
543         } else {
544                 tok_id = KG_TOK_MIC_MSG;
545                 ec_rrc = 0xffff;
546         }
547
548         /* sanity checks */
549         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
550                 CERROR("bad token id\n");
551                 return GSS_S_DEFECTIVE_TOKEN;
552         }
553         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
554                 CERROR("bad direction flag\n");
555                 return GSS_S_BAD_SIG;
556         }
557         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
558                 CERROR("missing confidential flag\n");
559                 return GSS_S_BAD_SIG;
560         }
561         if (khdr->kh_filler != 0xff) {
562                 CERROR("bad filler\n");
563                 return GSS_S_DEFECTIVE_TOKEN;
564         }
565         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
566             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
567                 CERROR("bad EC or RRC\n");
568                 return GSS_S_DEFECTIVE_TOKEN;
569         }
570         return GSS_S_COMPLETE;
571 }
572
573 static
574 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
575                            int msgcnt,
576                            rawobj_t *msgs,
577                            int iovcnt,
578                            lnet_kiov_t *iovs,
579                            rawobj_t *token)
580 {
581         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
582         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
583         struct krb5_header  *khdr;
584         rawobj_t cksum = RAWOBJ_EMPTY;
585         u32 major;
586
587         /* fill krb5 header */
588         LASSERT(token->len >= sizeof(*khdr));
589         khdr = (struct krb5_header *)token->data;
590         fill_krb5_header(kctx, khdr, 0);
591
592         /* checksum */
593         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, khdr,
594                                msgcnt, msgs, iovcnt, iovs, &cksum))
595                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
596
597         LASSERT(cksum.len >= ke->ke_hash_size);
598         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
599         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
600                ke->ke_hash_size);
601
602         token->len = sizeof(*khdr) + ke->ke_hash_size;
603         major = GSS_S_COMPLETE;
604 out_free_cksum:
605         rawobj_free(&cksum);
606         return major;
607 }
608
609 static
610 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
611                               int msgcnt,
612                               rawobj_t *msgs,
613                               int iovcnt,
614                               lnet_kiov_t *iovs,
615                               rawobj_t *token)
616 {
617         struct krb5_ctx *kctx = gctx->internal_ctx_id;
618         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
619         struct krb5_header *khdr;
620         rawobj_t cksum = RAWOBJ_EMPTY;
621         u32 major;
622
623         if (token->len < sizeof(*khdr)) {
624                 CERROR("short signature: %u\n", token->len);
625                 return GSS_S_DEFECTIVE_TOKEN;
626         }
627
628         khdr = (struct krb5_header *)token->data;
629
630         major = verify_krb5_header(kctx, khdr, 0);
631         if (major != GSS_S_COMPLETE) {
632                 CERROR("bad krb5 header\n");
633                 goto out;
634         }
635
636         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
637                 CERROR("short signature: %u, require %d\n",
638                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
639                 GOTO(out, major = GSS_S_FAILURE);
640         }
641
642         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
643                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
644                 CERROR("failed to make checksum\n");
645                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
646         }
647
648         LASSERT(cksum.len >= ke->ke_hash_size);
649         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
650                    ke->ke_hash_size)) {
651                 CERROR("checksum mismatch\n");
652                 GOTO(out_free_cksum, major = GSS_S_BAD_SIG);
653         }
654         major = GSS_S_COMPLETE;
655 out_free_cksum:
656         rawobj_free(&cksum);
657 out:
658         return major;
659 }
660
661 /*
662  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
663  */
664 static
665 int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
666                       struct krb5_header *khdr,
667                       char *confounder,
668                       struct ptlrpc_bulk_desc *desc,
669                       rawobj_t *cipher,
670                       int adj_nob)
671 {
672         struct blkcipher_desc   ciph_desc;
673         __u8                    local_iv[16] = {0};
674         struct scatterlist      src, dst;
675         struct sg_table         sg_src, sg_dst;
676         int                     blocksize, i, rc, nob = 0;
677
678         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
679         LASSERT(desc->bd_iov_count);
680         LASSERT(GET_ENC_KIOV(desc));
681
682         blocksize = crypto_blkcipher_blocksize(tfm);
683         LASSERT(blocksize > 1);
684         LASSERT(cipher->len == blocksize + sizeof(*khdr));
685
686         ciph_desc.tfm  = tfm;
687         ciph_desc.info = local_iv;
688         ciph_desc.flags = 0;
689
690         /* encrypt confounder */
691         rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
692         if (rc != 0)
693                 return rc;
694
695         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
696         if (rc != 0) {
697                 gss_teardown_sgtable(&sg_src);
698                 return rc;
699         }
700
701         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
702                                          sg_src.sgl, blocksize);
703
704         gss_teardown_sgtable(&sg_dst);
705         gss_teardown_sgtable(&sg_src);
706
707         if (rc) {
708                 CERROR("error to encrypt confounder: %d\n", rc);
709                 return rc;
710         }
711
712         /* encrypt clear pages */
713         for (i = 0; i < desc->bd_iov_count; i++) {
714                 sg_init_table(&src, 1);
715                 sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
716                             (BD_GET_KIOV(desc, i).kiov_len +
717                                 blocksize - 1) &
718                             (~(blocksize - 1)),
719                             BD_GET_KIOV(desc, i).kiov_offset);
720                 if (adj_nob)
721                         nob += src.length;
722                 sg_init_table(&dst, 1);
723                 sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
724                             src.length, src.offset);
725
726                 BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
727                 BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
728
729                 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
730                                                     src.length);
731                 if (rc) {
732                         CERROR("error to encrypt page: %d\n", rc);
733                         return rc;
734                 }
735         }
736
737         /* encrypt krb5 header */
738         rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
739         if (rc != 0)
740                 return rc;
741
742         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
743                            sizeof(*khdr));
744         if (rc != 0) {
745                 gss_teardown_sgtable(&sg_src);
746                 return rc;
747         }
748
749         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
750                                          sizeof(*khdr));
751
752         gss_teardown_sgtable(&sg_dst);
753         gss_teardown_sgtable(&sg_src);
754
755         if (rc) {
756                 CERROR("error to encrypt krb5 header: %d\n", rc);
757                 return rc;
758         }
759
760         if (adj_nob)
761                 desc->bd_nob = nob;
762
763         return 0;
764 }
765
766 /*
767  * desc->bd_nob_transferred is the size of cipher text received.
768  * desc->bd_nob is the target size of plain text supposed to be.
769  *
770  * if adj_nob != 0, we adjust each page's kiov_len to the actual
771  * plain text size.
772  * - for client read: we don't know data size for each page, so
773  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
774  *   be smaller, so we need to adjust it according to
775  *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
776  *   this means we DO NOT support the situation that server send an odd size
777  *   data in a page which is not the last one.
778  * - for server write: we knows exactly data size for each page being expected,
779  *   thus kiov_len is accurate already, so we should not adjust it at all.
780  *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
781  *   round_up(bd_iov[]->kiov_len) which
782  *   should have been done by prep_bulk().
783  */
784 static
785 int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
786                       struct krb5_header *khdr,
787                       struct ptlrpc_bulk_desc *desc,
788                       rawobj_t *cipher,
789                       rawobj_t *plain,
790                       int adj_nob)
791 {
792         struct blkcipher_desc   ciph_desc;
793         __u8                    local_iv[16] = {0};
794         struct scatterlist      src, dst;
795         struct sg_table         sg_src, sg_dst;
796         int                     ct_nob = 0, pt_nob = 0;
797         int                     blocksize, i, rc;
798
799         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
800         LASSERT(desc->bd_iov_count);
801         LASSERT(GET_ENC_KIOV(desc));
802         LASSERT(desc->bd_nob_transferred);
803
804         blocksize = crypto_blkcipher_blocksize(tfm);
805         LASSERT(blocksize > 1);
806         LASSERT(cipher->len == blocksize + sizeof(*khdr));
807
808         ciph_desc.tfm  = tfm;
809         ciph_desc.info = local_iv;
810         ciph_desc.flags = 0;
811
812         if (desc->bd_nob_transferred % blocksize) {
813                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
814                 return -EPROTO;
815         }
816
817         /* decrypt head (confounder) */
818         rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
819         if (rc != 0)
820                 return rc;
821
822         rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
823         if (rc != 0) {
824                 gss_teardown_sgtable(&sg_src);
825                 return rc;
826         }
827
828         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
829                                          sg_src.sgl, blocksize);
830
831         gss_teardown_sgtable(&sg_dst);
832         gss_teardown_sgtable(&sg_src);
833
834         if (rc) {
835                 CERROR("error to decrypt confounder: %d\n", rc);
836                 return rc;
837         }
838
839         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
840              i++) {
841                 if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
842                     != 0 ||
843                     BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
844                     != 0) {
845                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
846                                i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
847                                BD_GET_ENC_KIOV(desc, i).kiov_len,
848                                blocksize);
849                         return -EFAULT;
850                 }
851
852                 if (adj_nob) {
853                         if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
854                             desc->bd_nob_transferred)
855                                 BD_GET_ENC_KIOV(desc, i).kiov_len =
856                                         desc->bd_nob_transferred - ct_nob;
857
858                         BD_GET_KIOV(desc, i).kiov_len =
859                           BD_GET_ENC_KIOV(desc, i).kiov_len;
860                         if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
861                             desc->bd_nob)
862                                 BD_GET_KIOV(desc, i).kiov_len =
863                                   desc->bd_nob - pt_nob;
864                 } else {
865                         /* this should be guaranteed by LNET */
866                         LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
867                                 kiov_len <=
868                                 desc->bd_nob_transferred);
869                         LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
870                                 BD_GET_ENC_KIOV(desc, i).kiov_len);
871                 }
872
873                 if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
874                         continue;
875
876                 sg_init_table(&src, 1);
877                 sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
878                             BD_GET_ENC_KIOV(desc, i).kiov_len,
879                             BD_GET_ENC_KIOV(desc, i).kiov_offset);
880                 dst = src;
881                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
882                         sg_assign_page(&dst,
883                                        BD_GET_KIOV(desc, i).kiov_page);
884
885                 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
886                                                  src.length);
887                 if (rc) {
888                         CERROR("error to decrypt page: %d\n", rc);
889                         return rc;
890                 }
891
892                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
893                         memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
894                                BD_GET_KIOV(desc, i).kiov_offset,
895                                page_address(BD_GET_ENC_KIOV(desc, i).
896                                             kiov_page) +
897                                BD_GET_KIOV(desc, i).kiov_offset,
898                                BD_GET_KIOV(desc, i).kiov_len);
899                 }
900
901                 ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
902                 pt_nob += BD_GET_KIOV(desc, i).kiov_len;
903         }
904
905         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
906                 CERROR("%d cipher text transferred but only %d decrypted\n",
907                        desc->bd_nob_transferred, ct_nob);
908                 return -EFAULT;
909         }
910
911         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
912                 CERROR("%d plain text expected but only %d received\n",
913                        desc->bd_nob, pt_nob);
914                 return -EFAULT;
915         }
916
917         /* if needed, clear up the rest unused iovs */
918         if (adj_nob)
919                 while (i < desc->bd_iov_count)
920                         BD_GET_KIOV(desc, i++).kiov_len = 0;
921
922         /* decrypt tail (krb5 header) */
923         rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
924                                sizeof(*khdr));
925         if (rc != 0)
926                 return rc;
927
928         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
929                                sizeof(*khdr));
930         if (rc != 0) {
931                 gss_teardown_sgtable(&sg_src);
932                 return rc;
933         }
934
935         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
936                                          sizeof(*khdr));
937
938         gss_teardown_sgtable(&sg_src);
939         gss_teardown_sgtable(&sg_dst);
940
941         if (rc) {
942                 CERROR("error to decrypt tail: %d\n", rc);
943                 return rc;
944         }
945
946         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
947                 CERROR("krb5 header doesn't match\n");
948                 return -EACCES;
949         }
950
951         return 0;
952 }
953
954 static
955 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
956                         rawobj_t *gsshdr,
957                         rawobj_t *msg,
958                         int msg_buflen,
959                         rawobj_t *token)
960 {
961         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
962         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
963         struct krb5_header  *khdr;
964         int                  blocksize;
965         rawobj_t             cksum = RAWOBJ_EMPTY;
966         rawobj_t             data_desc[3], cipher;
967         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
968         __u8                 local_iv[16] = {0};
969         u32 major;
970         int                  rc = 0;
971
972         LASSERT(ke);
973         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
974         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
975                 ke->ke_conf_size >=
976                 crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
977
978         /*
979          * final token format:
980          * ---------------------------------------------------
981          * | krb5 header | cipher text | checksum (16 bytes) |
982          * ---------------------------------------------------
983          */
984
985         /* fill krb5 header */
986         LASSERT(token->len >= sizeof(*khdr));
987         khdr = (struct krb5_header *)token->data;
988         fill_krb5_header(kctx, khdr, 1);
989
990         /* generate confounder */
991         get_random_bytes(conf, ke->ke_conf_size);
992
993         /* get encryption blocksize. note kc_keye might not associated with
994          * a tfm, currently only for arcfour-hmac */
995         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
996                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
997                 blocksize = 1;
998         } else {
999                 LASSERT(kctx->kc_keye.kb_tfm);
1000                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1001         }
1002         LASSERT(blocksize <= ke->ke_conf_size);
1003
1004         /* padding the message */
1005         if (gss_add_padding(msg, msg_buflen, blocksize))
1006                 return GSS_S_FAILURE;
1007
1008         /*
1009          * clear text layout for checksum:
1010          * ------------------------------------------------------
1011          * | confounder | gss header | clear msgs | krb5 header |
1012          * ------------------------------------------------------
1013          */
1014         data_desc[0].data = conf;
1015         data_desc[0].len = ke->ke_conf_size;
1016         data_desc[1].data = gsshdr->data;
1017         data_desc[1].len = gsshdr->len;
1018         data_desc[2].data = msg->data;
1019         data_desc[2].len = msg->len;
1020
1021         /* compute checksum */
1022         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1023                                khdr, 3, data_desc, 0, NULL, &cksum))
1024                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1025         LASSERT(cksum.len >= ke->ke_hash_size);
1026
1027         /*
1028          * clear text layout for encryption:
1029          * -----------------------------------------
1030          * | confounder | clear msgs | krb5 header |
1031          * -----------------------------------------
1032          */
1033         data_desc[0].data = conf;
1034         data_desc[0].len = ke->ke_conf_size;
1035         data_desc[1].data = msg->data;
1036         data_desc[1].len = msg->len;
1037         data_desc[2].data = (__u8 *) khdr;
1038         data_desc[2].len = sizeof(*khdr);
1039
1040         /* cipher text will be directly inplace */
1041         cipher.data = (__u8 *)(khdr + 1);
1042         cipher.len = token->len - sizeof(*khdr);
1043         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1044
1045         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1046                 rawobj_t arc4_keye = RAWOBJ_EMPTY;
1047                 struct crypto_blkcipher *arc4_tfm;
1048
1049                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1050                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1051                         CERROR("failed to obtain arc4 enc key\n");
1052                         GOTO(arc4_out_key, rc = -EACCES);
1053                 }
1054
1055                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1056                 if (IS_ERR(arc4_tfm)) {
1057                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1058                         GOTO(arc4_out_key, rc = -EACCES);
1059                 }
1060
1061                 if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1062                                                arc4_keye.len)) {
1063                         CERROR("failed to set arc4 key, len %d\n",
1064                                arc4_keye.len);
1065                         GOTO(arc4_out_tfm, rc = -EACCES);
1066                 }
1067
1068                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
1069                                        &cipher, 1);
1070 arc4_out_tfm:
1071                 crypto_free_blkcipher(arc4_tfm);
1072 arc4_out_key:
1073                 rawobj_free(&arc4_keye);
1074         } else {
1075                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
1076                                        data_desc, &cipher, 1);
1077         }
1078
1079         if (rc)
1080                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1081
1082         /* fill in checksum */
1083         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1084         memcpy((char *)(khdr + 1) + cipher.len,
1085                cksum.data + cksum.len - ke->ke_hash_size,
1086                ke->ke_hash_size);
1087
1088         /* final token length */
1089         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1090         major = GSS_S_COMPLETE;
1091 out_free_cksum:
1092         rawobj_free(&cksum);
1093         return major;
1094 }
1095
1096 static
1097 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1098                              struct ptlrpc_bulk_desc *desc)
1099 {
1100         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1101         int                  blocksize, i;
1102
1103         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1104         LASSERT(desc->bd_iov_count);
1105         LASSERT(GET_ENC_KIOV(desc));
1106         LASSERT(kctx->kc_keye.kb_tfm);
1107
1108         blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1109
1110         for (i = 0; i < desc->bd_iov_count; i++) {
1111                 LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
1112                 /*
1113                  * offset should always start at page boundary of either
1114                  * client or server side.
1115                  */
1116                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
1117                         CERROR("odd offset %d in page %d\n",
1118                                BD_GET_KIOV(desc, i).kiov_offset, i);
1119                         return GSS_S_FAILURE;
1120                 }
1121
1122                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
1123                         BD_GET_KIOV(desc, i).kiov_offset;
1124                 BD_GET_ENC_KIOV(desc, i).kiov_len =
1125                         (BD_GET_KIOV(desc, i).kiov_len +
1126                          blocksize - 1) & (~(blocksize - 1));
1127         }
1128
1129         return GSS_S_COMPLETE;
1130 }
1131
1132 static
1133 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1134                              struct ptlrpc_bulk_desc *desc,
1135                              rawobj_t *token, int adj_nob)
1136 {
1137         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1138         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1139         struct krb5_header  *khdr;
1140         int                  blocksize;
1141         rawobj_t             cksum = RAWOBJ_EMPTY;
1142         rawobj_t             data_desc[1], cipher;
1143         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1144         int rc = 0;
1145         u32 major;
1146
1147         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1148         LASSERT(ke);
1149         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1150
1151         /*
1152          * final token format:
1153          * --------------------------------------------------
1154          * | krb5 header | head/tail cipher text | checksum |
1155          * --------------------------------------------------
1156          */
1157
1158         /* fill krb5 header */
1159         LASSERT(token->len >= sizeof(*khdr));
1160         khdr = (struct krb5_header *)token->data;
1161         fill_krb5_header(kctx, khdr, 1);
1162
1163         /* generate confounder */
1164         get_random_bytes(conf, ke->ke_conf_size);
1165
1166         /* get encryption blocksize. note kc_keye might not associated with
1167          * a tfm, currently only for arcfour-hmac */
1168         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1169                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1170                 blocksize = 1;
1171         } else {
1172                 LASSERT(kctx->kc_keye.kb_tfm);
1173                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1174         }
1175
1176         /*
1177          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1178          * the bulk token size would be exactly (sizeof(krb5_header) +
1179          * blocksize + sizeof(krb5_header) + hashsize)
1180          */
1181         LASSERT(blocksize <= ke->ke_conf_size);
1182         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1183         LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1184
1185         /*
1186          * clear text layout for checksum:
1187          * ------------------------------------------
1188          * | confounder | clear pages | krb5 header |
1189          * ------------------------------------------
1190          */
1191         data_desc[0].data = conf;
1192         data_desc[0].len = ke->ke_conf_size;
1193
1194         /* compute checksum */
1195         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1196                                khdr, 1, data_desc,
1197                                desc->bd_iov_count, GET_KIOV(desc),
1198                                &cksum))
1199                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1200         LASSERT(cksum.len >= ke->ke_hash_size);
1201
1202         /*
1203          * clear text layout for encryption:
1204          * ------------------------------------------
1205          * | confounder | clear pages | krb5 header |
1206          * ------------------------------------------
1207          *        |              |             |
1208          *        ----------  (cipher pages)   |
1209          * result token:   |                   |
1210          * -------------------------------------------
1211          * | krb5 header | cipher text | cipher text |
1212          * -------------------------------------------
1213          */
1214         data_desc[0].data = conf;
1215         data_desc[0].len = ke->ke_conf_size;
1216
1217         cipher.data = (__u8 *)(khdr + 1);
1218         cipher.len = blocksize + sizeof(*khdr);
1219
1220         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1221                 LBUG();
1222                 rc = 0;
1223         } else {
1224                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1225                                        conf, desc, &cipher, adj_nob);
1226         }
1227         if (rc)
1228                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1229
1230         /* fill in checksum */
1231         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1232         memcpy((char *)(khdr + 1) + cipher.len,
1233                cksum.data + cksum.len - ke->ke_hash_size,
1234                ke->ke_hash_size);
1235
1236         /* final token length */
1237         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1238         major = GSS_S_COMPLETE;
1239 out_free_cksum:
1240         rawobj_free(&cksum);
1241         return major;
1242 }
1243
1244 static
1245 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1246                           rawobj_t        *gsshdr,
1247                           rawobj_t        *token,
1248                           rawobj_t        *msg)
1249 {
1250         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1251         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1252         struct krb5_header  *khdr;
1253         unsigned char       *tmpbuf;
1254         int                  blocksize, bodysize;
1255         rawobj_t             cksum = RAWOBJ_EMPTY;
1256         rawobj_t             cipher_in, plain_out;
1257         rawobj_t             hash_objs[3];
1258         int                  rc = 0;
1259         __u32                major;
1260         __u8                 local_iv[16] = {0};
1261
1262         LASSERT(ke);
1263
1264         if (token->len < sizeof(*khdr)) {
1265                 CERROR("short signature: %u\n", token->len);
1266                 return GSS_S_DEFECTIVE_TOKEN;
1267         }
1268
1269         khdr = (struct krb5_header *)token->data;
1270
1271         major = verify_krb5_header(kctx, khdr, 1);
1272         if (major != GSS_S_COMPLETE) {
1273                 CERROR("bad krb5 header\n");
1274                 return major;
1275         }
1276
1277         /* block size */
1278         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1279                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1280                 blocksize = 1;
1281         } else {
1282                 LASSERT(kctx->kc_keye.kb_tfm);
1283                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1284         }
1285
1286         /* expected token layout:
1287          * ----------------------------------------
1288          * | krb5 header | cipher text | checksum |
1289          * ----------------------------------------
1290          */
1291         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1292
1293         if (bodysize % blocksize) {
1294                 CERROR("odd bodysize %d\n", bodysize);
1295                 return GSS_S_DEFECTIVE_TOKEN;
1296         }
1297
1298         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1299                 CERROR("incomplete token: bodysize %d\n", bodysize);
1300                 return GSS_S_DEFECTIVE_TOKEN;
1301         }
1302
1303         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1304                 CERROR("buffer too small: %u, require %d\n",
1305                        msg->len, bodysize - ke->ke_conf_size);
1306                 return GSS_S_FAILURE;
1307         }
1308
1309         /* decrypting */
1310         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1311         if (!tmpbuf)
1312                 return GSS_S_FAILURE;
1313
1314         major = GSS_S_FAILURE;
1315
1316         cipher_in.data = (__u8 *)(khdr + 1);
1317         cipher_in.len = bodysize;
1318         plain_out.data = tmpbuf;
1319         plain_out.len = bodysize;
1320
1321         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1322                 rawobj_t                 arc4_keye;
1323                 struct crypto_blkcipher *arc4_tfm;
1324
1325                 cksum.data = token->data + token->len - ke->ke_hash_size;
1326                 cksum.len = ke->ke_hash_size;
1327
1328                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1329                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1330                         CERROR("failed to obtain arc4 enc key\n");
1331                         GOTO(arc4_out, rc = -EACCES);
1332                 }
1333
1334                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1335                 if (IS_ERR(arc4_tfm)) {
1336                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1337                         GOTO(arc4_out_key, rc = -EACCES);
1338                 }
1339
1340                 if (crypto_blkcipher_setkey(arc4_tfm,
1341                                          arc4_keye.data, arc4_keye.len)) {
1342                         CERROR("failed to set arc4 key, len %d\n",
1343                                arc4_keye.len);
1344                         GOTO(arc4_out_tfm, rc = -EACCES);
1345                 }
1346
1347                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
1348                                        &plain_out, 0);
1349 arc4_out_tfm:
1350                 crypto_free_blkcipher(arc4_tfm);
1351 arc4_out_key:
1352                 rawobj_free(&arc4_keye);
1353 arc4_out:
1354                 cksum = RAWOBJ_EMPTY;
1355         } else {
1356                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1,
1357                                        &cipher_in, &plain_out, 0);
1358         }
1359
1360         if (rc != 0) {
1361                 CERROR("error decrypt\n");
1362                 goto out_free;
1363         }
1364         LASSERT(plain_out.len == bodysize);
1365
1366         /* expected clear text layout:
1367          * -----------------------------------------
1368          * | confounder | clear msgs | krb5 header |
1369          * -----------------------------------------
1370          */
1371
1372         /* verify krb5 header in token is not modified */
1373         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1374                    sizeof(*khdr))) {
1375                 CERROR("decrypted krb5 header mismatch\n");
1376                 goto out_free;
1377         }
1378
1379         /* verify checksum, compose clear text as layout:
1380          * ------------------------------------------------------
1381          * | confounder | gss header | clear msgs | krb5 header |
1382          * ------------------------------------------------------
1383          */
1384         hash_objs[0].len = ke->ke_conf_size;
1385         hash_objs[0].data = plain_out.data;
1386         hash_objs[1].len = gsshdr->len;
1387         hash_objs[1].data = gsshdr->data;
1388         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1389         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1390         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1391                                khdr, 3, hash_objs, 0, NULL, &cksum))
1392                 goto out_free;
1393
1394         LASSERT(cksum.len >= ke->ke_hash_size);
1395         if (memcmp((char *)(khdr + 1) + bodysize,
1396                    cksum.data + cksum.len - ke->ke_hash_size,
1397                    ke->ke_hash_size)) {
1398                 CERROR("checksum mismatch\n");
1399                 goto out_free;
1400         }
1401
1402         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1403         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1404
1405         major = GSS_S_COMPLETE;
1406 out_free:
1407         OBD_FREE_LARGE(tmpbuf, bodysize);
1408         rawobj_free(&cksum);
1409         return major;
1410 }
1411
1412 static
1413 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1414                                struct ptlrpc_bulk_desc *desc,
1415                                rawobj_t *token, int adj_nob)
1416 {
1417         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1418         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1419         struct krb5_header  *khdr;
1420         int                  blocksize;
1421         rawobj_t             cksum = RAWOBJ_EMPTY;
1422         rawobj_t             cipher, plain;
1423         rawobj_t             data_desc[1];
1424         int                  rc;
1425         __u32                major;
1426
1427         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1428         LASSERT(ke);
1429
1430         if (token->len < sizeof(*khdr)) {
1431                 CERROR("short signature: %u\n", token->len);
1432                 return GSS_S_DEFECTIVE_TOKEN;
1433         }
1434
1435         khdr = (struct krb5_header *)token->data;
1436
1437         major = verify_krb5_header(kctx, khdr, 1);
1438         if (major != GSS_S_COMPLETE) {
1439                 CERROR("bad krb5 header\n");
1440                 return major;
1441         }
1442
1443         /* block size */
1444         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1445                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1446                 blocksize = 1;
1447                 LBUG();
1448         } else {
1449                 LASSERT(kctx->kc_keye.kb_tfm);
1450                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1451         }
1452         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1453
1454         /*
1455          * token format is expected as:
1456          * -----------------------------------------------
1457          * | krb5 header | head/tail cipher text | cksum |
1458          * -----------------------------------------------
1459          */
1460         if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1461                          ke->ke_hash_size) {
1462                 CERROR("short token size: %u\n", token->len);
1463                 return GSS_S_DEFECTIVE_TOKEN;
1464         }
1465
1466         cipher.data = (__u8 *) (khdr + 1);
1467         cipher.len = blocksize + sizeof(*khdr);
1468         plain.data = cipher.data;
1469         plain.len = cipher.len;
1470
1471         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1472                                desc, &cipher, &plain, adj_nob);
1473         if (rc)
1474                 return GSS_S_DEFECTIVE_TOKEN;
1475
1476         /*
1477          * verify checksum, compose clear text as layout:
1478          * ------------------------------------------
1479          * | confounder | clear pages | krb5 header |
1480          * ------------------------------------------
1481          */
1482         data_desc[0].data = plain.data;
1483         data_desc[0].len = blocksize;
1484
1485         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1486                                khdr, 1, data_desc,
1487                                desc->bd_iov_count,
1488                                GET_KIOV(desc),
1489                                &cksum))
1490                 return GSS_S_FAILURE;
1491         LASSERT(cksum.len >= ke->ke_hash_size);
1492
1493         if (memcmp(plain.data + blocksize + sizeof(*khdr),
1494                    cksum.data + cksum.len - ke->ke_hash_size,
1495                    ke->ke_hash_size)) {
1496                 CERROR("checksum mismatch\n");
1497                 rawobj_free(&cksum);
1498                 return GSS_S_BAD_SIG;
1499         }
1500
1501         rawobj_free(&cksum);
1502         return GSS_S_COMPLETE;
1503 }
1504
1505 int gss_display_kerberos(struct gss_ctx        *ctx,
1506                          char                  *buf,
1507                          int                    bufsize)
1508 {
1509         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1510         int                 written;
1511
1512         written = snprintf(buf, bufsize, "krb5 (%s)",
1513                            enctype2str(kctx->kc_enctype));
1514         return written;
1515 }
1516
1517 static struct gss_api_ops gss_kerberos_ops = {
1518         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1519         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1520         .gss_inquire_context        = gss_inquire_context_kerberos,
1521         .gss_get_mic                = gss_get_mic_kerberos,
1522         .gss_verify_mic             = gss_verify_mic_kerberos,
1523         .gss_wrap                   = gss_wrap_kerberos,
1524         .gss_unwrap                 = gss_unwrap_kerberos,
1525         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1526         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1527         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1528         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1529         .gss_display                = gss_display_kerberos,
1530 };
1531
1532 static struct subflavor_desc gss_kerberos_sfs[] = {
1533         {
1534                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1535                 .sf_qop         = 0,
1536                 .sf_service     = SPTLRPC_SVC_NULL,
1537                 .sf_name        = "krb5n"
1538         },
1539         {
1540                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1541                 .sf_qop         = 0,
1542                 .sf_service     = SPTLRPC_SVC_AUTH,
1543                 .sf_name        = "krb5a"
1544         },
1545         {
1546                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1547                 .sf_qop         = 0,
1548                 .sf_service     = SPTLRPC_SVC_INTG,
1549                 .sf_name        = "krb5i"
1550         },
1551         {
1552                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1553                 .sf_qop         = 0,
1554                 .sf_service     = SPTLRPC_SVC_PRIV,
1555                 .sf_name        = "krb5p"
1556         },
1557 };
1558
1559 static struct gss_api_mech gss_kerberos_mech = {
1560         /* .gm_owner uses default NULL value for THIS_MODULE */
1561         .gm_name        = "krb5",
1562         .gm_oid         = (rawobj_t)
1563                                 {9, "\052\206\110\206\367\022\001\002\002"},
1564         .gm_ops         = &gss_kerberos_ops,
1565         .gm_sf_num      = 4,
1566         .gm_sfs         = gss_kerberos_sfs,
1567 };
1568
1569 int __init init_kerberos_module(void)
1570 {
1571         int status;
1572
1573         status = lgss_mech_register(&gss_kerberos_mech);
1574         if (status)
1575                 CERROR("Failed to register kerberos gss mechanism!\n");
1576         return status;
1577 }
1578
1579 void cleanup_kerberos_module(void)
1580 {
1581         lgss_mech_unregister(&gss_kerberos_mech);
1582 }