Whamcloud - gitweb
LU-9019 sec: migrate to 64 bit time
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, 2015, Intel Corporation.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/crypto.h>
56 #include <linux/mutex.h>
57
58 #include <obd.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre_net.h>
62 #include <lustre_import.h>
63 #include <lustre_sec.h>
64
65 #include "gss_err.h"
66 #include "gss_internal.h"
67 #include "gss_api.h"
68 #include "gss_asn1.h"
69 #include "gss_krb5.h"
70 #include "gss_crypto.h"
71
72 static spinlock_t krb5_seq_lock;
73
74 struct krb5_enctype {
75         char           *ke_dispname;
76         char           *ke_enc_name;            /* linux tfm name */
77         char           *ke_hash_name;           /* linux tfm name */
78         int             ke_enc_mode;            /* linux tfm mode */
79         int             ke_hash_size;           /* checksum size */
80         int             ke_conf_size;           /* confounder size */
81         unsigned int    ke_hash_hmac:1;         /* is hmac? */
82 };
83
84 /*
85  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
86  * but currently we simply CBC with padding, because linux doesn't support CTS
87  * yet. this need to be fixed in the future.
88  */
89 static struct krb5_enctype enctypes[] = {
90         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
91                 .ke_dispname    = "des-cbc-md5",
92                 .ke_enc_name    = "cbc(des)",
93                 .ke_hash_name   = "md5",
94                 .ke_hash_size   = 16,
95                 .ke_conf_size   = 8,
96         },
97         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
98                 .ke_dispname    = "des3-hmac-sha1",
99                 .ke_enc_name    = "cbc(des3_ede)",
100                 .ke_hash_name   = "hmac(sha1)",
101                 .ke_hash_size   = 20,
102                 .ke_conf_size   = 8,
103                 .ke_hash_hmac   = 1,
104         },
105         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
106                 .ke_dispname    = "aes128-cts-hmac-sha1-96",
107                 .ke_enc_name    = "cbc(aes)",
108                 .ke_hash_name   = "hmac(sha1)",
109                 .ke_hash_size   = 12,
110                 .ke_conf_size   = 16,
111                 .ke_hash_hmac   = 1,
112         },
113         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
114                 .ke_dispname    = "aes256-cts-hmac-sha1-96",
115                 .ke_enc_name    = "cbc(aes)",
116                 .ke_hash_name   = "hmac(sha1)",
117                 .ke_hash_size   = 12,
118                 .ke_conf_size   = 16,
119                 .ke_hash_hmac   = 1,
120         },
121         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
122                 .ke_dispname    = "arcfour-hmac-md5",
123                 .ke_enc_name    = "ecb(arc4)",
124                 .ke_hash_name   = "hmac(md5)",
125                 .ke_hash_size   = 16,
126                 .ke_conf_size   = 8,
127                 .ke_hash_hmac   = 1,
128         }
129 };
130
131 #define MAX_ENCTYPES    sizeof(enctypes)/sizeof(struct krb5_enctype)
132
133 static const char * enctype2str(__u32 enctype)
134 {
135         if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
136                 return enctypes[enctype].ke_dispname;
137
138         return "unknown";
139 }
140
141 static
142 int krb5_init_keys(struct krb5_ctx *kctx)
143 {
144         struct krb5_enctype *ke;
145
146         if (kctx->kc_enctype >= MAX_ENCTYPES ||
147             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
148                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
149                 return -1;
150         }
151
152         ke = &enctypes[kctx->kc_enctype];
153
154         /* tfm arc4 is stateful, user should alloc-use-free by his own */
155         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
156             gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
157                 return -1;
158
159         /* tfm hmac is stateful, user should alloc-use-free by his own */
160         if (ke->ke_hash_hmac == 0 &&
161             gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
162                 return -1;
163         if (ke->ke_hash_hmac == 0 &&
164             gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
165                 return -1;
166
167         return 0;
168 }
169
170 static
171 void delete_context_kerberos(struct krb5_ctx *kctx)
172 {
173         rawobj_free(&kctx->kc_mech_used);
174
175         gss_keyblock_free(&kctx->kc_keye);
176         gss_keyblock_free(&kctx->kc_keyi);
177         gss_keyblock_free(&kctx->kc_keyc);
178 }
179
180 static
181 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
182 {
183         unsigned int    tmp_uint, keysize;
184
185         /* seed_init flag */
186         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
187                 goto out_err;
188         kctx->kc_seed_init = (tmp_uint != 0);
189
190         /* seed */
191         if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
192                 goto out_err;
193
194         /* sign/seal algorithm, not really used now */
195         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
196             gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
197                 goto out_err;
198
199         /* end time. While kc_endtime might be 64 bit the krb5 API
200          * still uses 32 bits. To delay the 2038 bug see the incoming
201          * value as a u32 which give us until 2106. See the link for details:
202          *
203          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
204          */
205         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
206                 goto out_err;
207
208         /* seq send */
209         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
210                 goto out_err;
211         kctx->kc_seq_send = tmp_uint;
212
213         /* mech oid */
214         if (gss_get_rawobj(&p, end, &kctx->kc_mech_used))
215                 goto out_err;
216
217         /* old style enc/seq keys in format:
218          *   - enctype (u32)
219          *   - keysize (u32)
220          *   - keydata
221          * we decompose them to fit into the new context
222          */
223
224         /* enc key */
225         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
226                 goto out_err;
227
228         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
229                 goto out_err;
230
231         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
232                 goto out_err;
233
234         /* seq key */
235         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
236             tmp_uint != kctx->kc_enctype)
237                 goto out_err;
238
239         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
240             tmp_uint != keysize)
241                 goto out_err;
242
243         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
244                 goto out_err;
245
246         /* old style fallback */
247         if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
248                 goto out_err;
249
250         if (p != end)
251                 goto out_err;
252
253         CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
254         return 0;
255 out_err:
256         return GSS_S_FAILURE;
257 }
258
259 /* Flags for version 2 context flags */
260 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
261 #define KRB5_CTX_FLAG_CFX               0x00000002
262 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
263
264 static
265 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
266 {
267         unsigned int    tmp_uint, keysize;
268
269         /* end time. While kc_endtime might be 64 bit the krb5 API
270          * still uses 32 bits. To delay the 2038 bug see the incoming
271          * value as a u32 which give us until 2106. See the link for details:
272          *
273          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
274          */
275         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
276                 goto out_err;
277
278         /* flags */
279         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
280                 goto out_err;
281
282         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
283                 kctx->kc_initiate = 1;
284         if (tmp_uint & KRB5_CTX_FLAG_CFX)
285                 kctx->kc_cfx = 1;
286         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
287                 kctx->kc_have_acceptor_subkey = 1;
288
289         /* seq send */
290         if (gss_get_bytes(&p, end, &kctx->kc_seq_send,
291             sizeof(kctx->kc_seq_send)))
292                 goto out_err;
293
294         /* enctype */
295         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
296                 goto out_err;
297
298         /* size of each key */
299         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
300                 goto out_err;
301
302         /* number of keys - should always be 3 */
303         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
304                 goto out_err;
305
306         if (tmp_uint != 3) {
307                 CERROR("Invalid number of keys: %u\n", tmp_uint);
308                 goto out_err;
309         }
310
311         /* ke */
312         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
313                 goto out_err;
314         /* ki */
315         if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize))
316                 goto out_err;
317         /* ki */
318         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
319                 goto out_err;
320
321         CDEBUG(D_SEC, "successfully imported v2 context\n");
322         return 0;
323 out_err:
324         return GSS_S_FAILURE;
325 }
326
327 /*
328  * The whole purpose here is trying to keep user level gss context parsing
329  * from nfs-utils unchanged as possible as we can, they are not quite mature
330  * yet, and many stuff still not clear, like heimdal etc.
331  */
332 static
333 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
334                                       struct gss_ctx *gctx)
335 {
336         struct krb5_ctx *kctx;
337         char *p = (char *)inbuf->data;
338         char *end = (char *)(inbuf->data + inbuf->len);
339         unsigned int tmp_uint, rc;
340
341         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
342                 CERROR("Fail to read version\n");
343                 return GSS_S_FAILURE;
344         }
345
346         /* only support 0, 1 for the moment */
347         if (tmp_uint > 2) {
348                 CERROR("Invalid version %u\n", tmp_uint);
349                 return GSS_S_FAILURE;
350         }
351
352         OBD_ALLOC_PTR(kctx);
353         if (!kctx)
354                 return GSS_S_FAILURE;
355
356         if (tmp_uint == 0 || tmp_uint == 1) {
357                 kctx->kc_initiate = tmp_uint;
358                 rc = import_context_rfc1964(kctx, p, end);
359         } else {
360                 rc = import_context_rfc4121(kctx, p, end);
361         }
362
363         if (rc == 0)
364                 rc = krb5_init_keys(kctx);
365
366         if (rc) {
367                 delete_context_kerberos(kctx);
368                 OBD_FREE_PTR(kctx);
369
370                 return GSS_S_FAILURE;
371         }
372
373         gctx->internal_ctx_id = kctx;
374         return GSS_S_COMPLETE;
375 }
376
377 static
378 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
379                                         struct gss_ctx *gctx_new)
380 {
381         struct krb5_ctx *kctx = gctx->internal_ctx_id;
382         struct krb5_ctx *knew;
383
384         OBD_ALLOC_PTR(knew);
385         if (!knew)
386                 return GSS_S_FAILURE;
387
388         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
389         knew->kc_cfx = kctx->kc_cfx;
390         knew->kc_seed_init = kctx->kc_seed_init;
391         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
392         knew->kc_endtime = kctx->kc_endtime;
393
394         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
395         knew->kc_seq_send = kctx->kc_seq_recv;
396         knew->kc_seq_recv = kctx->kc_seq_send;
397         knew->kc_enctype = kctx->kc_enctype;
398
399         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
400                 goto out_err;
401
402         if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
403                 goto out_err;
404         if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
405                 goto out_err;
406         if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
407                 goto out_err;
408         if (krb5_init_keys(knew))
409                 goto out_err;
410
411         gctx_new->internal_ctx_id = knew;
412         CDEBUG(D_SEC, "successfully copied reverse context\n");
413         return GSS_S_COMPLETE;
414
415 out_err:
416         delete_context_kerberos(knew);
417         OBD_FREE_PTR(knew);
418         return GSS_S_FAILURE;
419 }
420
421 static
422 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
423                                    time64_t *endtime)
424 {
425         struct krb5_ctx *kctx = gctx->internal_ctx_id;
426
427         *endtime = kctx->kc_endtime;
428         return GSS_S_COMPLETE;
429 }
430
431 static
432 void gss_delete_sec_context_kerberos(void *internal_ctx)
433 {
434         struct krb5_ctx *kctx = internal_ctx;
435
436         delete_context_kerberos(kctx);
437         OBD_FREE_PTR(kctx);
438 }
439
440 /*
441  * compute (keyed/keyless) checksum against the plain text which appended
442  * with krb5 wire token header.
443  */
444 static
445 __s32 krb5_make_checksum(__u32 enctype,
446                          struct gss_keyblock *kb,
447                          struct krb5_header *khdr,
448                          int msgcnt, rawobj_t *msgs,
449                          int iovcnt, lnet_kiov_t *iovs,
450                          rawobj_t *cksum)
451 {
452         struct krb5_enctype   *ke = &enctypes[enctype];
453         struct crypto_hash    *tfm;
454         rawobj_t               hdr;
455         __u32                  code = GSS_S_FAILURE;
456         int                    rc;
457
458         if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
459                 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
460                 return GSS_S_FAILURE;
461         }
462
463         cksum->len = crypto_hash_digestsize(tfm);
464         OBD_ALLOC_LARGE(cksum->data, cksum->len);
465         if (!cksum->data) {
466                 cksum->len = 0;
467                 goto out_tfm;
468         }
469
470         hdr.data = (__u8 *)khdr;
471         hdr.len = sizeof(*khdr);
472
473         if (ke->ke_hash_hmac)
474                 rc = gss_digest_hmac(tfm, &kb->kb_key,
475                                      &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
476         else
477                 rc = gss_digest_norm(tfm, kb,
478                                      &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
479
480         if (rc == 0)
481                 code = GSS_S_COMPLETE;
482 out_tfm:
483         crypto_free_hash(tfm);
484         return code;
485 }
486
487 static void fill_krb5_header(struct krb5_ctx *kctx,
488                              struct krb5_header *khdr,
489                              int privacy)
490 {
491         unsigned char acceptor_flag;
492
493         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
494
495         if (privacy) {
496                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
497                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
498                 khdr->kh_ec = cpu_to_be16(0);
499                 khdr->kh_rrc = cpu_to_be16(0);
500         } else {
501                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
502                 khdr->kh_flags = acceptor_flag;
503                 khdr->kh_ec = cpu_to_be16(0xffff);
504                 khdr->kh_rrc = cpu_to_be16(0xffff);
505         }
506
507         khdr->kh_filler = 0xff;
508         spin_lock(&krb5_seq_lock);
509         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
510         spin_unlock(&krb5_seq_lock);
511 }
512
513 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
514                                 struct krb5_header *khdr,
515                                 int privacy)
516 {
517         unsigned char acceptor_flag;
518         __u16         tok_id, ec_rrc;
519
520         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
521
522         if (privacy) {
523                 tok_id = KG_TOK_WRAP_MSG;
524                 ec_rrc = 0x0;
525         } else {
526                 tok_id = KG_TOK_MIC_MSG;
527                 ec_rrc = 0xffff;
528         }
529
530         /* sanity checks */
531         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
532                 CERROR("bad token id\n");
533                 return GSS_S_DEFECTIVE_TOKEN;
534         }
535         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
536                 CERROR("bad direction flag\n");
537                 return GSS_S_BAD_SIG;
538         }
539         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
540                 CERROR("missing confidential flag\n");
541                 return GSS_S_BAD_SIG;
542         }
543         if (khdr->kh_filler != 0xff) {
544                 CERROR("bad filler\n");
545                 return GSS_S_DEFECTIVE_TOKEN;
546         }
547         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
548             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
549                 CERROR("bad EC or RRC\n");
550                 return GSS_S_DEFECTIVE_TOKEN;
551         }
552         return GSS_S_COMPLETE;
553 }
554
555 static
556 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
557                            int msgcnt,
558                            rawobj_t *msgs,
559                            int iovcnt,
560                            lnet_kiov_t *iovs,
561                            rawobj_t *token)
562 {
563         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
564         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
565         struct krb5_header  *khdr;
566         rawobj_t             cksum = RAWOBJ_EMPTY;
567
568         /* fill krb5 header */
569         LASSERT(token->len >= sizeof(*khdr));
570         khdr = (struct krb5_header *)token->data;
571         fill_krb5_header(kctx, khdr, 0);
572
573         /* checksum */
574         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
575                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
576                 return GSS_S_FAILURE;
577
578         LASSERT(cksum.len >= ke->ke_hash_size);
579         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
580         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
581                ke->ke_hash_size);
582
583         token->len = sizeof(*khdr) + ke->ke_hash_size;
584         rawobj_free(&cksum);
585         return GSS_S_COMPLETE;
586 }
587
588 static
589 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
590                               int msgcnt,
591                               rawobj_t *msgs,
592                               int iovcnt,
593                               lnet_kiov_t *iovs,
594                               rawobj_t *token)
595 {
596         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
597         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
598         struct krb5_header  *khdr;
599         rawobj_t             cksum = RAWOBJ_EMPTY;
600         __u32                major;
601
602         if (token->len < sizeof(*khdr)) {
603                 CERROR("short signature: %u\n", token->len);
604                 return GSS_S_DEFECTIVE_TOKEN;
605         }
606
607         khdr = (struct krb5_header *)token->data;
608
609         major = verify_krb5_header(kctx, khdr, 0);
610         if (major != GSS_S_COMPLETE) {
611                 CERROR("bad krb5 header\n");
612                 return major;
613         }
614
615         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
616                 CERROR("short signature: %u, require %d\n",
617                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
618                 return GSS_S_FAILURE;
619         }
620
621         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
622                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
623                 CERROR("failed to make checksum\n");
624                 return GSS_S_FAILURE;
625         }
626
627         LASSERT(cksum.len >= ke->ke_hash_size);
628         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
629                    ke->ke_hash_size)) {
630                 CERROR("checksum mismatch\n");
631                 rawobj_free(&cksum);
632                 return GSS_S_BAD_SIG;
633         }
634
635         rawobj_free(&cksum);
636         return GSS_S_COMPLETE;
637 }
638
639 /*
640  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
641  */
642 static
643 int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
644                       struct krb5_header *khdr,
645                       char *confounder,
646                       struct ptlrpc_bulk_desc *desc,
647                       rawobj_t *cipher,
648                       int adj_nob)
649 {
650         struct blkcipher_desc   ciph_desc;
651         __u8                    local_iv[16] = {0};
652         struct scatterlist      src, dst;
653         struct sg_table         sg_src, sg_dst;
654         int                     blocksize, i, rc, nob = 0;
655
656         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
657         LASSERT(desc->bd_iov_count);
658         LASSERT(GET_ENC_KIOV(desc));
659
660         blocksize = crypto_blkcipher_blocksize(tfm);
661         LASSERT(blocksize > 1);
662         LASSERT(cipher->len == blocksize + sizeof(*khdr));
663
664         ciph_desc.tfm  = tfm;
665         ciph_desc.info = local_iv;
666         ciph_desc.flags = 0;
667
668         /* encrypt confounder */
669         rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
670         if (rc != 0)
671                 return rc;
672
673         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
674         if (rc != 0) {
675                 gss_teardown_sgtable(&sg_src);
676                 return rc;
677         }
678
679         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
680                                          sg_src.sgl, blocksize);
681
682         gss_teardown_sgtable(&sg_dst);
683         gss_teardown_sgtable(&sg_src);
684
685         if (rc) {
686                 CERROR("error to encrypt confounder: %d\n", rc);
687                 return rc;
688         }
689
690         /* encrypt clear pages */
691         for (i = 0; i < desc->bd_iov_count; i++) {
692                 sg_init_table(&src, 1);
693                 sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
694                             (BD_GET_KIOV(desc, i).kiov_len +
695                                 blocksize - 1) &
696                             (~(blocksize - 1)),
697                             BD_GET_KIOV(desc, i).kiov_offset);
698                 if (adj_nob)
699                         nob += src.length;
700                 sg_init_table(&dst, 1);
701                 sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
702                             src.length, src.offset);
703
704                 BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
705                 BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
706
707                 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
708                                                     src.length);
709                 if (rc) {
710                         CERROR("error to encrypt page: %d\n", rc);
711                         return rc;
712                 }
713         }
714
715         /* encrypt krb5 header */
716         rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
717         if (rc != 0)
718                 return rc;
719
720         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
721                            sizeof(*khdr));
722         if (rc != 0) {
723                 gss_teardown_sgtable(&sg_src);
724                 return rc;
725         }
726
727         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
728                                          sizeof(*khdr));
729
730         gss_teardown_sgtable(&sg_dst);
731         gss_teardown_sgtable(&sg_src);
732
733         if (rc) {
734                 CERROR("error to encrypt krb5 header: %d\n", rc);
735                 return rc;
736         }
737
738         if (adj_nob)
739                 desc->bd_nob = nob;
740
741         return 0;
742 }
743
744 /*
745  * desc->bd_nob_transferred is the size of cipher text received.
746  * desc->bd_nob is the target size of plain text supposed to be.
747  *
748  * if adj_nob != 0, we adjust each page's kiov_len to the actual
749  * plain text size.
750  * - for client read: we don't know data size for each page, so
751  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
752  *   be smaller, so we need to adjust it according to
753  *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
754  *   this means we DO NOT support the situation that server send an odd size
755  *   data in a page which is not the last one.
756  * - for server write: we knows exactly data size for each page being expected,
757  *   thus kiov_len is accurate already, so we should not adjust it at all.
758  *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
759  *   round_up(bd_iov[]->kiov_len) which
760  *   should have been done by prep_bulk().
761  */
762 static
763 int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
764                       struct krb5_header *khdr,
765                       struct ptlrpc_bulk_desc *desc,
766                       rawobj_t *cipher,
767                       rawobj_t *plain,
768                       int adj_nob)
769 {
770         struct blkcipher_desc   ciph_desc;
771         __u8                    local_iv[16] = {0};
772         struct scatterlist      src, dst;
773         struct sg_table         sg_src, sg_dst;
774         int                     ct_nob = 0, pt_nob = 0;
775         int                     blocksize, i, rc;
776
777         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
778         LASSERT(desc->bd_iov_count);
779         LASSERT(GET_ENC_KIOV(desc));
780         LASSERT(desc->bd_nob_transferred);
781
782         blocksize = crypto_blkcipher_blocksize(tfm);
783         LASSERT(blocksize > 1);
784         LASSERT(cipher->len == blocksize + sizeof(*khdr));
785
786         ciph_desc.tfm  = tfm;
787         ciph_desc.info = local_iv;
788         ciph_desc.flags = 0;
789
790         if (desc->bd_nob_transferred % blocksize) {
791                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
792                 return -EPROTO;
793         }
794
795         /* decrypt head (confounder) */
796         rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
797         if (rc != 0)
798                 return rc;
799
800         rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
801         if (rc != 0) {
802                 gss_teardown_sgtable(&sg_src);
803                 return rc;
804         }
805
806         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
807                                          sg_src.sgl, blocksize);
808
809         gss_teardown_sgtable(&sg_dst);
810         gss_teardown_sgtable(&sg_src);
811
812         if (rc) {
813                 CERROR("error to decrypt confounder: %d\n", rc);
814                 return rc;
815         }
816
817         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
818              i++) {
819                 if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
820                     != 0 ||
821                     BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
822                     != 0) {
823                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
824                                i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
825                                BD_GET_ENC_KIOV(desc, i).kiov_len,
826                                blocksize);
827                         return -EFAULT;
828                 }
829
830                 if (adj_nob) {
831                         if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
832                             desc->bd_nob_transferred)
833                                 BD_GET_ENC_KIOV(desc, i).kiov_len =
834                                         desc->bd_nob_transferred - ct_nob;
835
836                         BD_GET_KIOV(desc, i).kiov_len =
837                           BD_GET_ENC_KIOV(desc, i).kiov_len;
838                         if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
839                             desc->bd_nob)
840                                 BD_GET_KIOV(desc, i).kiov_len =
841                                   desc->bd_nob - pt_nob;
842                 } else {
843                         /* this should be guaranteed by LNET */
844                         LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
845                                 kiov_len <=
846                                 desc->bd_nob_transferred);
847                         LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
848                                 BD_GET_ENC_KIOV(desc, i).kiov_len);
849                 }
850
851                 if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
852                         continue;
853
854                 sg_init_table(&src, 1);
855                 sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
856                             BD_GET_ENC_KIOV(desc, i).kiov_len,
857                             BD_GET_ENC_KIOV(desc, i).kiov_offset);
858                 dst = src;
859                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
860                         sg_assign_page(&dst,
861                                        BD_GET_KIOV(desc, i).kiov_page);
862
863                 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
864                                                  src.length);
865                 if (rc) {
866                         CERROR("error to decrypt page: %d\n", rc);
867                         return rc;
868                 }
869
870                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
871                         memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
872                                BD_GET_KIOV(desc, i).kiov_offset,
873                                page_address(BD_GET_ENC_KIOV(desc, i).
874                                             kiov_page) +
875                                BD_GET_KIOV(desc, i).kiov_offset,
876                                BD_GET_KIOV(desc, i).kiov_len);
877                 }
878
879                 ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
880                 pt_nob += BD_GET_KIOV(desc, i).kiov_len;
881         }
882
883         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
884                 CERROR("%d cipher text transferred but only %d decrypted\n",
885                        desc->bd_nob_transferred, ct_nob);
886                 return -EFAULT;
887         }
888
889         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
890                 CERROR("%d plain text expected but only %d received\n",
891                        desc->bd_nob, pt_nob);
892                 return -EFAULT;
893         }
894
895         /* if needed, clear up the rest unused iovs */
896         if (adj_nob)
897                 while (i < desc->bd_iov_count)
898                         BD_GET_KIOV(desc, i++).kiov_len = 0;
899
900         /* decrypt tail (krb5 header) */
901         rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
902                                sizeof(*khdr));
903         if (rc != 0)
904                 return rc;
905
906         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
907                                sizeof(*khdr));
908         if (rc != 0) {
909                 gss_teardown_sgtable(&sg_src);
910                 return rc;
911         }
912
913         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
914                                          sizeof(*khdr));
915
916         gss_teardown_sgtable(&sg_src);
917         gss_teardown_sgtable(&sg_dst);
918
919         if (rc) {
920                 CERROR("error to decrypt tail: %d\n", rc);
921                 return rc;
922         }
923
924         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
925                 CERROR("krb5 header doesn't match\n");
926                 return -EACCES;
927         }
928
929         return 0;
930 }
931
932 static
933 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
934                         rawobj_t *gsshdr,
935                         rawobj_t *msg,
936                         int msg_buflen,
937                         rawobj_t *token)
938 {
939         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
940         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
941         struct krb5_header  *khdr;
942         int                  blocksize;
943         rawobj_t             cksum = RAWOBJ_EMPTY;
944         rawobj_t             data_desc[3], cipher;
945         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
946         __u8                 local_iv[16] = {0};
947         int                  rc = 0;
948
949         LASSERT(ke);
950         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
951         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
952                 ke->ke_conf_size >=
953                 crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
954
955         /*
956          * final token format:
957          * ---------------------------------------------------
958          * | krb5 header | cipher text | checksum (16 bytes) |
959          * ---------------------------------------------------
960          */
961
962         /* fill krb5 header */
963         LASSERT(token->len >= sizeof(*khdr));
964         khdr = (struct krb5_header *)token->data;
965         fill_krb5_header(kctx, khdr, 1);
966
967         /* generate confounder */
968         cfs_get_random_bytes(conf, ke->ke_conf_size);
969
970         /* get encryption blocksize. note kc_keye might not associated with
971          * a tfm, currently only for arcfour-hmac */
972         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
973                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
974                 blocksize = 1;
975         } else {
976                 LASSERT(kctx->kc_keye.kb_tfm);
977                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
978         }
979         LASSERT(blocksize <= ke->ke_conf_size);
980
981         /* padding the message */
982         if (gss_add_padding(msg, msg_buflen, blocksize))
983                 return GSS_S_FAILURE;
984
985         /*
986          * clear text layout for checksum:
987          * ------------------------------------------------------
988          * | confounder | gss header | clear msgs | krb5 header |
989          * ------------------------------------------------------
990          */
991         data_desc[0].data = conf;
992         data_desc[0].len = ke->ke_conf_size;
993         data_desc[1].data = gsshdr->data;
994         data_desc[1].len = gsshdr->len;
995         data_desc[2].data = msg->data;
996         data_desc[2].len = msg->len;
997
998         /* compute checksum */
999         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1000                                khdr, 3, data_desc, 0, NULL, &cksum))
1001                 return GSS_S_FAILURE;
1002         LASSERT(cksum.len >= ke->ke_hash_size);
1003
1004         /*
1005          * clear text layout for encryption:
1006          * -----------------------------------------
1007          * | confounder | clear msgs | krb5 header |
1008          * -----------------------------------------
1009          */
1010         data_desc[0].data = conf;
1011         data_desc[0].len = ke->ke_conf_size;
1012         data_desc[1].data = msg->data;
1013         data_desc[1].len = msg->len;
1014         data_desc[2].data = (__u8 *) khdr;
1015         data_desc[2].len = sizeof(*khdr);
1016
1017         /* cipher text will be directly inplace */
1018         cipher.data = (__u8 *)(khdr + 1);
1019         cipher.len = token->len - sizeof(*khdr);
1020         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1021
1022         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1023                 rawobj_t                 arc4_keye;
1024                 struct crypto_blkcipher *arc4_tfm;
1025
1026                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1027                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1028                         CERROR("failed to obtain arc4 enc key\n");
1029                         GOTO(arc4_out, rc = -EACCES);
1030                 }
1031
1032                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1033                 if (IS_ERR(arc4_tfm)) {
1034                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1035                         GOTO(arc4_out_key, rc = -EACCES);
1036                 }
1037
1038                 if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1039                                                arc4_keye.len)) {
1040                         CERROR("failed to set arc4 key, len %d\n",
1041                                arc4_keye.len);
1042                         GOTO(arc4_out_tfm, rc = -EACCES);
1043                 }
1044
1045                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
1046                                        &cipher, 1);
1047 arc4_out_tfm:
1048                 crypto_free_blkcipher(arc4_tfm);
1049 arc4_out_key:
1050                 rawobj_free(&arc4_keye);
1051 arc4_out:
1052                 do {} while(0); /* just to avoid compile warning */
1053         } else {
1054                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
1055                                        data_desc, &cipher, 1);
1056         }
1057
1058         if (rc != 0) {
1059                 rawobj_free(&cksum);
1060                 return GSS_S_FAILURE;
1061         }
1062
1063         /* fill in checksum */
1064         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1065         memcpy((char *)(khdr + 1) + cipher.len,
1066                cksum.data + cksum.len - ke->ke_hash_size,
1067                ke->ke_hash_size);
1068         rawobj_free(&cksum);
1069
1070         /* final token length */
1071         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1072         return GSS_S_COMPLETE;
1073 }
1074
1075 static
1076 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1077                              struct ptlrpc_bulk_desc *desc)
1078 {
1079         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1080         int                  blocksize, i;
1081
1082         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1083         LASSERT(desc->bd_iov_count);
1084         LASSERT(GET_ENC_KIOV(desc));
1085         LASSERT(kctx->kc_keye.kb_tfm);
1086
1087         blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1088
1089         for (i = 0; i < desc->bd_iov_count; i++) {
1090                 LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
1091                 /*
1092                  * offset should always start at page boundary of either
1093                  * client or server side.
1094                  */
1095                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
1096                         CERROR("odd offset %d in page %d\n",
1097                                BD_GET_KIOV(desc, i).kiov_offset, i);
1098                         return GSS_S_FAILURE;
1099                 }
1100
1101                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
1102                         BD_GET_KIOV(desc, i).kiov_offset;
1103                 BD_GET_ENC_KIOV(desc, i).kiov_len =
1104                         (BD_GET_KIOV(desc, i).kiov_len +
1105                          blocksize - 1) & (~(blocksize - 1));
1106         }
1107
1108         return GSS_S_COMPLETE;
1109 }
1110
1111 static
1112 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1113                              struct ptlrpc_bulk_desc *desc,
1114                              rawobj_t *token, int adj_nob)
1115 {
1116         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1117         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1118         struct krb5_header  *khdr;
1119         int                  blocksize;
1120         rawobj_t             cksum = RAWOBJ_EMPTY;
1121         rawobj_t             data_desc[1], cipher;
1122         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1123         int                  rc = 0;
1124
1125         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1126         LASSERT(ke);
1127         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1128
1129         /*
1130          * final token format:
1131          * --------------------------------------------------
1132          * | krb5 header | head/tail cipher text | checksum |
1133          * --------------------------------------------------
1134          */
1135
1136         /* fill krb5 header */
1137         LASSERT(token->len >= sizeof(*khdr));
1138         khdr = (struct krb5_header *)token->data;
1139         fill_krb5_header(kctx, khdr, 1);
1140
1141         /* generate confounder */
1142         cfs_get_random_bytes(conf, ke->ke_conf_size);
1143
1144         /* get encryption blocksize. note kc_keye might not associated with
1145          * a tfm, currently only for arcfour-hmac */
1146         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1147                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1148                 blocksize = 1;
1149         } else {
1150                 LASSERT(kctx->kc_keye.kb_tfm);
1151                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1152         }
1153
1154         /*
1155          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1156          * the bulk token size would be exactly (sizeof(krb5_header) +
1157          * blocksize + sizeof(krb5_header) + hashsize)
1158          */
1159         LASSERT(blocksize <= ke->ke_conf_size);
1160         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1161         LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1162
1163         /*
1164          * clear text layout for checksum:
1165          * ------------------------------------------
1166          * | confounder | clear pages | krb5 header |
1167          * ------------------------------------------
1168          */
1169         data_desc[0].data = conf;
1170         data_desc[0].len = ke->ke_conf_size;
1171
1172         /* compute checksum */
1173         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1174                                khdr, 1, data_desc,
1175                                desc->bd_iov_count, GET_KIOV(desc),
1176                                &cksum))
1177                 return GSS_S_FAILURE;
1178         LASSERT(cksum.len >= ke->ke_hash_size);
1179
1180         /*
1181          * clear text layout for encryption:
1182          * ------------------------------------------
1183          * | confounder | clear pages | krb5 header |
1184          * ------------------------------------------
1185          *        |              |             |
1186          *        ----------  (cipher pages)   |
1187          * result token:   |                   |
1188          * -------------------------------------------
1189          * | krb5 header | cipher text | cipher text |
1190          * -------------------------------------------
1191          */
1192         data_desc[0].data = conf;
1193         data_desc[0].len = ke->ke_conf_size;
1194
1195         cipher.data = (__u8 *)(khdr + 1);
1196         cipher.len = blocksize + sizeof(*khdr);
1197
1198         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1199                 LBUG();
1200                 rc = 0;
1201         } else {
1202                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1203                                        conf, desc, &cipher, adj_nob);
1204         }
1205
1206         if (rc != 0) {
1207                 rawobj_free(&cksum);
1208                 return GSS_S_FAILURE;
1209         }
1210
1211         /* fill in checksum */
1212         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1213         memcpy((char *)(khdr + 1) + cipher.len,
1214                cksum.data + cksum.len - ke->ke_hash_size,
1215                ke->ke_hash_size);
1216         rawobj_free(&cksum);
1217
1218         /* final token length */
1219         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1220         return GSS_S_COMPLETE;
1221 }
1222
1223 static
1224 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1225                           rawobj_t        *gsshdr,
1226                           rawobj_t        *token,
1227                           rawobj_t        *msg)
1228 {
1229         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1230         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1231         struct krb5_header  *khdr;
1232         unsigned char       *tmpbuf;
1233         int                  blocksize, bodysize;
1234         rawobj_t             cksum = RAWOBJ_EMPTY;
1235         rawobj_t             cipher_in, plain_out;
1236         rawobj_t             hash_objs[3];
1237         int                  rc = 0;
1238         __u32                major;
1239         __u8                 local_iv[16] = {0};
1240
1241         LASSERT(ke);
1242
1243         if (token->len < sizeof(*khdr)) {
1244                 CERROR("short signature: %u\n", token->len);
1245                 return GSS_S_DEFECTIVE_TOKEN;
1246         }
1247
1248         khdr = (struct krb5_header *)token->data;
1249
1250         major = verify_krb5_header(kctx, khdr, 1);
1251         if (major != GSS_S_COMPLETE) {
1252                 CERROR("bad krb5 header\n");
1253                 return major;
1254         }
1255
1256         /* block size */
1257         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1258                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1259                 blocksize = 1;
1260         } else {
1261                 LASSERT(kctx->kc_keye.kb_tfm);
1262                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1263         }
1264
1265         /* expected token layout:
1266          * ----------------------------------------
1267          * | krb5 header | cipher text | checksum |
1268          * ----------------------------------------
1269          */
1270         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1271
1272         if (bodysize % blocksize) {
1273                 CERROR("odd bodysize %d\n", bodysize);
1274                 return GSS_S_DEFECTIVE_TOKEN;
1275         }
1276
1277         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1278                 CERROR("incomplete token: bodysize %d\n", bodysize);
1279                 return GSS_S_DEFECTIVE_TOKEN;
1280         }
1281
1282         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1283                 CERROR("buffer too small: %u, require %d\n",
1284                        msg->len, bodysize - ke->ke_conf_size);
1285                 return GSS_S_FAILURE;
1286         }
1287
1288         /* decrypting */
1289         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1290         if (!tmpbuf)
1291                 return GSS_S_FAILURE;
1292
1293         major = GSS_S_FAILURE;
1294
1295         cipher_in.data = (__u8 *)(khdr + 1);
1296         cipher_in.len = bodysize;
1297         plain_out.data = tmpbuf;
1298         plain_out.len = bodysize;
1299
1300         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1301                 rawobj_t                 arc4_keye;
1302                 struct crypto_blkcipher *arc4_tfm;
1303
1304                 cksum.data = token->data + token->len - ke->ke_hash_size;
1305                 cksum.len = ke->ke_hash_size;
1306
1307                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1308                                        NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1309                         CERROR("failed to obtain arc4 enc key\n");
1310                         GOTO(arc4_out, rc = -EACCES);
1311                 }
1312
1313                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1314                 if (IS_ERR(arc4_tfm)) {
1315                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1316                         GOTO(arc4_out_key, rc = -EACCES);
1317                 }
1318
1319                 if (crypto_blkcipher_setkey(arc4_tfm,
1320                                          arc4_keye.data, arc4_keye.len)) {
1321                         CERROR("failed to set arc4 key, len %d\n",
1322                                arc4_keye.len);
1323                         GOTO(arc4_out_tfm, rc = -EACCES);
1324                 }
1325
1326                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
1327                                        &plain_out, 0);
1328 arc4_out_tfm:
1329                 crypto_free_blkcipher(arc4_tfm);
1330 arc4_out_key:
1331                 rawobj_free(&arc4_keye);
1332 arc4_out:
1333                 cksum = RAWOBJ_EMPTY;
1334         } else {
1335                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1,
1336                                        &cipher_in, &plain_out, 0);
1337         }
1338
1339         if (rc != 0) {
1340                 CERROR("error decrypt\n");
1341                 goto out_free;
1342         }
1343         LASSERT(plain_out.len == bodysize);
1344
1345         /* expected clear text layout:
1346          * -----------------------------------------
1347          * | confounder | clear msgs | krb5 header |
1348          * -----------------------------------------
1349          */
1350
1351         /* verify krb5 header in token is not modified */
1352         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1353                    sizeof(*khdr))) {
1354                 CERROR("decrypted krb5 header mismatch\n");
1355                 goto out_free;
1356         }
1357
1358         /* verify checksum, compose clear text as layout:
1359          * ------------------------------------------------------
1360          * | confounder | gss header | clear msgs | krb5 header |
1361          * ------------------------------------------------------
1362          */
1363         hash_objs[0].len = ke->ke_conf_size;
1364         hash_objs[0].data = plain_out.data;
1365         hash_objs[1].len = gsshdr->len;
1366         hash_objs[1].data = gsshdr->data;
1367         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1368         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1369         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1370                                khdr, 3, hash_objs, 0, NULL, &cksum))
1371                 goto out_free;
1372
1373         LASSERT(cksum.len >= ke->ke_hash_size);
1374         if (memcmp((char *)(khdr + 1) + bodysize,
1375                    cksum.data + cksum.len - ke->ke_hash_size,
1376                    ke->ke_hash_size)) {
1377                 CERROR("checksum mismatch\n");
1378                 goto out_free;
1379         }
1380
1381         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1382         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1383
1384         major = GSS_S_COMPLETE;
1385 out_free:
1386         OBD_FREE_LARGE(tmpbuf, bodysize);
1387         rawobj_free(&cksum);
1388         return major;
1389 }
1390
1391 static
1392 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1393                                struct ptlrpc_bulk_desc *desc,
1394                                rawobj_t *token, int adj_nob)
1395 {
1396         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1397         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1398         struct krb5_header  *khdr;
1399         int                  blocksize;
1400         rawobj_t             cksum = RAWOBJ_EMPTY;
1401         rawobj_t             cipher, plain;
1402         rawobj_t             data_desc[1];
1403         int                  rc;
1404         __u32                major;
1405
1406         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1407         LASSERT(ke);
1408
1409         if (token->len < sizeof(*khdr)) {
1410                 CERROR("short signature: %u\n", token->len);
1411                 return GSS_S_DEFECTIVE_TOKEN;
1412         }
1413
1414         khdr = (struct krb5_header *)token->data;
1415
1416         major = verify_krb5_header(kctx, khdr, 1);
1417         if (major != GSS_S_COMPLETE) {
1418                 CERROR("bad krb5 header\n");
1419                 return major;
1420         }
1421
1422         /* block size */
1423         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1424                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1425                 blocksize = 1;
1426                 LBUG();
1427         } else {
1428                 LASSERT(kctx->kc_keye.kb_tfm);
1429                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1430         }
1431         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1432
1433         /*
1434          * token format is expected as:
1435          * -----------------------------------------------
1436          * | krb5 header | head/tail cipher text | cksum |
1437          * -----------------------------------------------
1438          */
1439         if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1440                          ke->ke_hash_size) {
1441                 CERROR("short token size: %u\n", token->len);
1442                 return GSS_S_DEFECTIVE_TOKEN;
1443         }
1444
1445         cipher.data = (__u8 *) (khdr + 1);
1446         cipher.len = blocksize + sizeof(*khdr);
1447         plain.data = cipher.data;
1448         plain.len = cipher.len;
1449
1450         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1451                                desc, &cipher, &plain, adj_nob);
1452         if (rc)
1453                 return GSS_S_DEFECTIVE_TOKEN;
1454
1455         /*
1456          * verify checksum, compose clear text as layout:
1457          * ------------------------------------------
1458          * | confounder | clear pages | krb5 header |
1459          * ------------------------------------------
1460          */
1461         data_desc[0].data = plain.data;
1462         data_desc[0].len = blocksize;
1463
1464         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1465                                khdr, 1, data_desc,
1466                                desc->bd_iov_count,
1467                                GET_KIOV(desc),
1468                                &cksum))
1469                 return GSS_S_FAILURE;
1470         LASSERT(cksum.len >= ke->ke_hash_size);
1471
1472         if (memcmp(plain.data + blocksize + sizeof(*khdr),
1473                    cksum.data + cksum.len - ke->ke_hash_size,
1474                    ke->ke_hash_size)) {
1475                 CERROR("checksum mismatch\n");
1476                 rawobj_free(&cksum);
1477                 return GSS_S_BAD_SIG;
1478         }
1479
1480         rawobj_free(&cksum);
1481         return GSS_S_COMPLETE;
1482 }
1483
1484 int gss_display_kerberos(struct gss_ctx        *ctx,
1485                          char                  *buf,
1486                          int                    bufsize)
1487 {
1488         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1489         int                 written;
1490
1491         written = snprintf(buf, bufsize, "krb5 (%s)",
1492                            enctype2str(kctx->kc_enctype));
1493         return written;
1494 }
1495
1496 static struct gss_api_ops gss_kerberos_ops = {
1497         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1498         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1499         .gss_inquire_context        = gss_inquire_context_kerberos,
1500         .gss_get_mic                = gss_get_mic_kerberos,
1501         .gss_verify_mic             = gss_verify_mic_kerberos,
1502         .gss_wrap                   = gss_wrap_kerberos,
1503         .gss_unwrap                 = gss_unwrap_kerberos,
1504         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1505         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1506         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1507         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1508         .gss_display                = gss_display_kerberos,
1509 };
1510
1511 static struct subflavor_desc gss_kerberos_sfs[] = {
1512         {
1513                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1514                 .sf_qop         = 0,
1515                 .sf_service     = SPTLRPC_SVC_NULL,
1516                 .sf_name        = "krb5n"
1517         },
1518         {
1519                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1520                 .sf_qop         = 0,
1521                 .sf_service     = SPTLRPC_SVC_AUTH,
1522                 .sf_name        = "krb5a"
1523         },
1524         {
1525                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1526                 .sf_qop         = 0,
1527                 .sf_service     = SPTLRPC_SVC_INTG,
1528                 .sf_name        = "krb5i"
1529         },
1530         {
1531                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1532                 .sf_qop         = 0,
1533                 .sf_service     = SPTLRPC_SVC_PRIV,
1534                 .sf_name        = "krb5p"
1535         },
1536 };
1537
1538 static struct gss_api_mech gss_kerberos_mech = {
1539         /* .gm_owner uses default NULL value for THIS_MODULE */
1540         .gm_name        = "krb5",
1541         .gm_oid         = (rawobj_t)
1542                                 {9, "\052\206\110\206\367\022\001\002\002"},
1543         .gm_ops         = &gss_kerberos_ops,
1544         .gm_sf_num      = 4,
1545         .gm_sfs         = gss_kerberos_sfs,
1546 };
1547
1548 int __init init_kerberos_module(void)
1549 {
1550         int status;
1551
1552         spin_lock_init(&krb5_seq_lock);
1553
1554         status = lgss_mech_register(&gss_kerberos_mech);
1555         if (status)
1556                 CERROR("Failed to register kerberos gss mechanism!\n");
1557         return status;
1558 }
1559
1560 void cleanup_kerberos_module(void)
1561 {
1562         lgss_mech_unregister(&gss_kerberos_mech);
1563 }