Whamcloud - gitweb
New release 2.12.7
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, 2015, Intel Corporation.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/crypto.h>
56 #include <linux/mutex.h>
57
58 #include <obd.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre_net.h>
62 #include <lustre_import.h>
63 #include <lustre_sec.h>
64
65 #include "gss_err.h"
66 #include "gss_internal.h"
67 #include "gss_api.h"
68 #include "gss_asn1.h"
69 #include "gss_krb5.h"
70 #include "gss_crypto.h"
71
72 static spinlock_t krb5_seq_lock;
73
74 struct krb5_enctype {
75         char           *ke_dispname;
76         char           *ke_enc_name;            /* linux tfm name */
77         char           *ke_hash_name;           /* linux tfm name */
78         int             ke_enc_mode;            /* linux tfm mode */
79         int             ke_hash_size;           /* checksum size */
80         int             ke_conf_size;           /* confounder size */
81         unsigned int    ke_hash_hmac:1;         /* is hmac? */
82 };
83
84 /*
85  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
86  * but currently we simply CBC with padding, because linux doesn't support CTS
87  * yet. this need to be fixed in the future.
88  */
89 static struct krb5_enctype enctypes[] = {
90         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
91                 .ke_dispname    = "des-cbc-md5",
92                 .ke_enc_name    = "cbc(des)",
93                 .ke_hash_name   = "md5",
94                 .ke_hash_size   = 16,
95                 .ke_conf_size   = 8,
96         },
97 #ifdef HAVE_DES3_SUPPORT
98         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
99                 .ke_dispname    = "des3-hmac-sha1",
100                 .ke_enc_name    = "cbc(des3_ede)",
101                 .ke_hash_name   = "sha1",
102                 .ke_hash_size   = 20,
103                 .ke_conf_size   = 8,
104                 .ke_hash_hmac   = 1,
105         },
106 #endif
107         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
108                 .ke_dispname    = "aes128-cts-hmac-sha1-96",
109                 .ke_enc_name    = "cbc(aes)",
110                 .ke_hash_name   = "sha1",
111                 .ke_hash_size   = 12,
112                 .ke_conf_size   = 16,
113                 .ke_hash_hmac   = 1,
114         },
115         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
116                 .ke_dispname    = "aes256-cts-hmac-sha1-96",
117                 .ke_enc_name    = "cbc(aes)",
118                 .ke_hash_name   = "sha1",
119                 .ke_hash_size   = 12,
120                 .ke_conf_size   = 16,
121                 .ke_hash_hmac   = 1,
122         },
123         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
124                 .ke_dispname    = "arcfour-hmac-md5",
125                 .ke_enc_name    = "ecb(arc4)",
126                 .ke_hash_name   = "md5",
127                 .ke_hash_size   = 16,
128                 .ke_conf_size   = 8,
129                 .ke_hash_hmac   = 1,
130         }
131 };
132
133 static const char * enctype2str(__u32 enctype)
134 {
135         if (enctype < ARRAY_SIZE(enctypes) && enctypes[enctype].ke_dispname)
136                 return enctypes[enctype].ke_dispname;
137
138         return "unknown";
139 }
140
141 static
142 int krb5_init_keys(struct krb5_ctx *kctx)
143 {
144         struct krb5_enctype *ke;
145
146         if (kctx->kc_enctype >= ARRAY_SIZE(enctypes) ||
147             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
148                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
149                 return -1;
150         }
151
152         ke = &enctypes[kctx->kc_enctype];
153
154         /* tfm arc4 is stateful, user should alloc-use-free by his own */
155         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
156             gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
157                 return -1;
158
159         /* tfm hmac is stateful, user should alloc-use-free by his own */
160         if (ke->ke_hash_hmac == 0 &&
161             gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
162                 return -1;
163         if (ke->ke_hash_hmac == 0 &&
164             gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
165                 return -1;
166
167         return 0;
168 }
169
170 static
171 void delete_context_kerberos(struct krb5_ctx *kctx)
172 {
173         rawobj_free(&kctx->kc_mech_used);
174
175         gss_keyblock_free(&kctx->kc_keye);
176         gss_keyblock_free(&kctx->kc_keyi);
177         gss_keyblock_free(&kctx->kc_keyc);
178 }
179
180 static
181 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
182 {
183         unsigned int    tmp_uint, keysize;
184
185         /* seed_init flag */
186         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
187                 goto out_err;
188         kctx->kc_seed_init = (tmp_uint != 0);
189
190         /* seed */
191         if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
192                 goto out_err;
193
194         /* sign/seal algorithm, not really used now */
195         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
196             gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
197                 goto out_err;
198
199         /* end time. While kc_endtime might be 64 bit the krb5 API
200          * still uses 32 bits. To delay the 2038 bug see the incoming
201          * value as a u32 which give us until 2106. See the link for details:
202          *
203          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
204          */
205         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
206                 goto out_err;
207
208         /* seq send */
209         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
210                 goto out_err;
211         kctx->kc_seq_send = tmp_uint;
212
213         /* mech oid */
214         if (gss_get_rawobj(&p, end, &kctx->kc_mech_used))
215                 goto out_err;
216
217         /* old style enc/seq keys in format:
218          *   - enctype (u32)
219          *   - keysize (u32)
220          *   - keydata
221          * we decompose them to fit into the new context
222          */
223
224         /* enc key */
225         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
226                 goto out_err;
227
228         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
229                 goto out_err;
230
231         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
232                 goto out_err;
233
234         /* seq key */
235         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
236             tmp_uint != kctx->kc_enctype)
237                 goto out_err;
238
239         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
240             tmp_uint != keysize)
241                 goto out_err;
242
243         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
244                 goto out_err;
245
246         /* old style fallback */
247         if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
248                 goto out_err;
249
250         if (p != end)
251                 goto out_err;
252
253         CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
254         return 0;
255 out_err:
256         return GSS_S_FAILURE;
257 }
258
259 /* Flags for version 2 context flags */
260 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
261 #define KRB5_CTX_FLAG_CFX               0x00000002
262 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
263
264 static
265 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
266 {
267         unsigned int    tmp_uint, keysize;
268
269         /* end time. While kc_endtime might be 64 bit the krb5 API
270          * still uses 32 bits. To delay the 2038 bug see the incoming
271          * value as a u32 which give us until 2106. See the link for details:
272          *
273          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
274          */
275         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
276                 goto out_err;
277
278         /* flags */
279         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
280                 goto out_err;
281
282         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
283                 kctx->kc_initiate = 1;
284         if (tmp_uint & KRB5_CTX_FLAG_CFX)
285                 kctx->kc_cfx = 1;
286         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
287                 kctx->kc_have_acceptor_subkey = 1;
288
289         /* seq send */
290         if (gss_get_bytes(&p, end, &kctx->kc_seq_send,
291             sizeof(kctx->kc_seq_send)))
292                 goto out_err;
293
294         /* enctype */
295         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
296                 goto out_err;
297
298         /* size of each key */
299         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
300                 goto out_err;
301
302         /* number of keys - should always be 3 */
303         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
304                 goto out_err;
305
306         if (tmp_uint != 3) {
307                 CERROR("Invalid number of keys: %u\n", tmp_uint);
308                 goto out_err;
309         }
310
311         /* ke */
312         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
313                 goto out_err;
314         /* ki */
315         if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize))
316                 goto out_err;
317         /* ki */
318         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
319                 goto out_err;
320
321         CDEBUG(D_SEC, "successfully imported v2 context\n");
322         return 0;
323 out_err:
324         return GSS_S_FAILURE;
325 }
326
327 /*
328  * The whole purpose here is trying to keep user level gss context parsing
329  * from nfs-utils unchanged as possible as we can, they are not quite mature
330  * yet, and many stuff still not clear, like heimdal etc.
331  */
332 static
333 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
334                                       struct gss_ctx *gctx)
335 {
336         struct krb5_ctx *kctx;
337         char *p = (char *)inbuf->data;
338         char *end = (char *)(inbuf->data + inbuf->len);
339         unsigned int tmp_uint, rc;
340
341         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
342                 CERROR("Fail to read version\n");
343                 return GSS_S_FAILURE;
344         }
345
346         /* only support 0, 1 for the moment */
347         if (tmp_uint > 2) {
348                 CERROR("Invalid version %u\n", tmp_uint);
349                 return GSS_S_FAILURE;
350         }
351
352         OBD_ALLOC_PTR(kctx);
353         if (!kctx)
354                 return GSS_S_FAILURE;
355
356         if (tmp_uint == 0 || tmp_uint == 1) {
357                 kctx->kc_initiate = tmp_uint;
358                 rc = import_context_rfc1964(kctx, p, end);
359         } else {
360                 rc = import_context_rfc4121(kctx, p, end);
361         }
362
363         if (rc == 0)
364                 rc = krb5_init_keys(kctx);
365
366         if (rc) {
367                 delete_context_kerberos(kctx);
368                 OBD_FREE_PTR(kctx);
369
370                 return GSS_S_FAILURE;
371         }
372
373         gctx->internal_ctx_id = kctx;
374         return GSS_S_COMPLETE;
375 }
376
377 static
378 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
379                                         struct gss_ctx *gctx_new)
380 {
381         struct krb5_ctx *kctx = gctx->internal_ctx_id;
382         struct krb5_ctx *knew;
383
384         OBD_ALLOC_PTR(knew);
385         if (!knew)
386                 return GSS_S_FAILURE;
387
388         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
389         knew->kc_cfx = kctx->kc_cfx;
390         knew->kc_seed_init = kctx->kc_seed_init;
391         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
392         knew->kc_endtime = kctx->kc_endtime;
393
394         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
395         knew->kc_seq_send = kctx->kc_seq_recv;
396         knew->kc_seq_recv = kctx->kc_seq_send;
397         knew->kc_enctype = kctx->kc_enctype;
398
399         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
400                 goto out_err;
401
402         if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
403                 goto out_err;
404         if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
405                 goto out_err;
406         if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
407                 goto out_err;
408         if (krb5_init_keys(knew))
409                 goto out_err;
410
411         gctx_new->internal_ctx_id = knew;
412         CDEBUG(D_SEC, "successfully copied reverse context\n");
413         return GSS_S_COMPLETE;
414
415 out_err:
416         delete_context_kerberos(knew);
417         OBD_FREE_PTR(knew);
418         return GSS_S_FAILURE;
419 }
420
421 static
422 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
423                                    time64_t *endtime)
424 {
425         struct krb5_ctx *kctx = gctx->internal_ctx_id;
426
427         *endtime = kctx->kc_endtime;
428         return GSS_S_COMPLETE;
429 }
430
431 static
432 void gss_delete_sec_context_kerberos(void *internal_ctx)
433 {
434         struct krb5_ctx *kctx = internal_ctx;
435
436         delete_context_kerberos(kctx);
437         OBD_FREE_PTR(kctx);
438 }
439
440 /*
441  * compute (keyed/keyless) checksum against the plain text which appended
442  * with krb5 wire token header.
443  */
444 static
445 __s32 krb5_make_checksum(__u32 enctype,
446                          struct gss_keyblock *kb,
447                          struct krb5_header *khdr,
448                          int msgcnt, rawobj_t *msgs,
449                          int iovcnt, lnet_kiov_t *iovs,
450                          rawobj_t *cksum,
451                          digest_hash hash_func)
452 {
453         struct krb5_enctype *ke = &enctypes[enctype];
454         struct ahash_request *req = NULL;
455         enum cfs_crypto_hash_alg hash_algo;
456         rawobj_t hdr;
457         int rc;
458
459         hash_algo = cfs_crypto_hash_alg(ke->ke_hash_name);
460
461         /* For the cbc(des) case we want md5 instead of hmac(md5) */
462         if (strcmp(ke->ke_enc_name, "cbc(des)"))
463                 req = cfs_crypto_hash_init(hash_algo, kb->kb_key.data,
464                                            kb->kb_key.len);
465         else
466                 req = cfs_crypto_hash_init(hash_algo, NULL, 0);
467         if (IS_ERR(req)) {
468                 rc = PTR_ERR(req);
469                 CERROR("failed to alloc hash %s : rc = %d\n",
470                        ke->ke_hash_name, rc);
471                 goto out_no_hash;
472         }
473
474         cksum->len = cfs_crypto_hash_digestsize(hash_algo);
475         OBD_ALLOC_LARGE(cksum->data, cksum->len);
476         if (!cksum->data) {
477                 cksum->len = 0;
478                 rc = -ENOMEM;
479                 goto out_free_hash;
480         }
481
482         hdr.data = (__u8 *)khdr;
483         hdr.len = sizeof(*khdr);
484
485         if (!hash_func) {
486                 rc = -EPROTO;
487                 CERROR("hash function for %s undefined\n",
488                        ke->ke_hash_name);
489                 goto out_free_hash;
490         }
491         rc = hash_func(req, &hdr, msgcnt, msgs, iovcnt, iovs);
492         if (rc)
493                 goto out_free_hash;
494
495         if (!ke->ke_hash_hmac) {
496                 LASSERT(kb->kb_tfm);
497
498                 cfs_crypto_hash_final(req, cksum->data, &cksum->len);
499                 rc = gss_crypt_generic(kb->kb_tfm, 0, NULL,
500                                        cksum->data, cksum->data,
501                                        cksum->len);
502                 goto out_no_hash;
503         }
504
505 out_free_hash:
506         if (req)
507                 cfs_crypto_hash_final(req, cksum->data, &cksum->len);
508 out_no_hash:
509         return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
510 }
511
512 static void fill_krb5_header(struct krb5_ctx *kctx,
513                              struct krb5_header *khdr,
514                              int privacy)
515 {
516         unsigned char acceptor_flag;
517
518         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
519
520         if (privacy) {
521                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
522                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
523                 khdr->kh_ec = cpu_to_be16(0);
524                 khdr->kh_rrc = cpu_to_be16(0);
525         } else {
526                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
527                 khdr->kh_flags = acceptor_flag;
528                 khdr->kh_ec = cpu_to_be16(0xffff);
529                 khdr->kh_rrc = cpu_to_be16(0xffff);
530         }
531
532         khdr->kh_filler = 0xff;
533         spin_lock(&krb5_seq_lock);
534         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
535         spin_unlock(&krb5_seq_lock);
536 }
537
538 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
539                                 struct krb5_header *khdr,
540                                 int privacy)
541 {
542         unsigned char acceptor_flag;
543         __u16         tok_id, ec_rrc;
544
545         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
546
547         if (privacy) {
548                 tok_id = KG_TOK_WRAP_MSG;
549                 ec_rrc = 0x0;
550         } else {
551                 tok_id = KG_TOK_MIC_MSG;
552                 ec_rrc = 0xffff;
553         }
554
555         /* sanity checks */
556         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
557                 CERROR("bad token id\n");
558                 return GSS_S_DEFECTIVE_TOKEN;
559         }
560         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
561                 CERROR("bad direction flag\n");
562                 return GSS_S_BAD_SIG;
563         }
564         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
565                 CERROR("missing confidential flag\n");
566                 return GSS_S_BAD_SIG;
567         }
568         if (khdr->kh_filler != 0xff) {
569                 CERROR("bad filler\n");
570                 return GSS_S_DEFECTIVE_TOKEN;
571         }
572         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
573             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
574                 CERROR("bad EC or RRC\n");
575                 return GSS_S_DEFECTIVE_TOKEN;
576         }
577         return GSS_S_COMPLETE;
578 }
579
580 static
581 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
582                            int msgcnt,
583                            rawobj_t *msgs,
584                            int iovcnt,
585                            lnet_kiov_t *iovs,
586                            rawobj_t *token)
587 {
588         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
589         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
590         struct krb5_header  *khdr;
591         rawobj_t cksum = RAWOBJ_EMPTY;
592         u32 major;
593
594         /* fill krb5 header */
595         LASSERT(token->len >= sizeof(*khdr));
596         khdr = (struct krb5_header *)token->data;
597         fill_krb5_header(kctx, khdr, 0);
598
599         /* checksum */
600         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, khdr,
601                                msgcnt, msgs, iovcnt, iovs, &cksum,
602                                gctx->hash_func))
603                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
604
605         LASSERT(cksum.len >= ke->ke_hash_size);
606         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
607         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
608                ke->ke_hash_size);
609
610         token->len = sizeof(*khdr) + ke->ke_hash_size;
611         major = GSS_S_COMPLETE;
612 out_free_cksum:
613         rawobj_free(&cksum);
614         return major;
615 }
616
617 static
618 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
619                               int msgcnt,
620                               rawobj_t *msgs,
621                               int iovcnt,
622                               lnet_kiov_t *iovs,
623                               rawobj_t *token)
624 {
625         struct krb5_ctx *kctx = gctx->internal_ctx_id;
626         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
627         struct krb5_header *khdr;
628         rawobj_t cksum = RAWOBJ_EMPTY;
629         u32 major;
630
631         if (token->len < sizeof(*khdr)) {
632                 CERROR("short signature: %u\n", token->len);
633                 return GSS_S_DEFECTIVE_TOKEN;
634         }
635
636         khdr = (struct krb5_header *)token->data;
637
638         major = verify_krb5_header(kctx, khdr, 0);
639         if (major != GSS_S_COMPLETE) {
640                 CERROR("bad krb5 header\n");
641                 goto out;
642         }
643
644         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
645                 CERROR("short signature: %u, require %d\n",
646                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
647                 GOTO(out, major = GSS_S_FAILURE);
648         }
649
650         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
651                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum,
652                                gctx->hash_func))
653                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
654
655         LASSERT(cksum.len >= ke->ke_hash_size);
656         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
657                    ke->ke_hash_size)) {
658                 CERROR("checksum mismatch\n");
659                 GOTO(out_free_cksum, major = GSS_S_BAD_SIG);
660         }
661         major = GSS_S_COMPLETE;
662 out_free_cksum:
663         rawobj_free(&cksum);
664 out:
665         return major;
666 }
667
668 /*
669  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
670  */
671 static
672 int krb5_encrypt_bulk(struct crypto_sync_skcipher *tfm,
673                       struct krb5_header *khdr,
674                       char *confounder,
675                       struct ptlrpc_bulk_desc *desc,
676                       rawobj_t *cipher,
677                       int adj_nob)
678 {
679         __u8 local_iv[16] = {0};
680         struct scatterlist src, dst;
681         struct sg_table sg_src, sg_dst;
682         int blocksize, i, rc, nob = 0;
683         SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
684
685         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
686         LASSERT(desc->bd_iov_count);
687         LASSERT(GET_ENC_KIOV(desc));
688
689         blocksize = crypto_sync_skcipher_blocksize(tfm);
690         LASSERT(blocksize > 1);
691         LASSERT(cipher->len == blocksize + sizeof(*khdr));
692
693         /* encrypt confounder */
694         rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
695         if (rc != 0)
696                 return rc;
697
698         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
699         if (rc != 0) {
700                 gss_teardown_sgtable(&sg_src);
701                 return rc;
702         }
703         skcipher_request_set_sync_tfm(req, tfm);
704         skcipher_request_set_callback(req, 0, NULL, NULL);
705         skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
706                                    blocksize, local_iv);
707
708         rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
709
710         gss_teardown_sgtable(&sg_dst);
711         gss_teardown_sgtable(&sg_src);
712
713         if (rc) {
714                 CERROR("error to encrypt confounder: %d\n", rc);
715                 skcipher_request_zero(req);
716                 return rc;
717         }
718
719         /* encrypt clear pages */
720         for (i = 0; i < desc->bd_iov_count; i++) {
721                 sg_init_table(&src, 1);
722                 sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
723                             (BD_GET_KIOV(desc, i).kiov_len +
724                                 blocksize - 1) &
725                             (~(blocksize - 1)),
726                             BD_GET_KIOV(desc, i).kiov_offset);
727                 if (adj_nob)
728                         nob += src.length;
729                 sg_init_table(&dst, 1);
730                 sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
731                             src.length, src.offset);
732
733                 BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
734                 BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
735
736                 skcipher_request_set_crypt(req, &src, &dst,
737                                           src.length, local_iv);
738                 rc = crypto_skcipher_encrypt_iv(req, &dst, &src, src.length);
739                 if (rc) {
740                         CERROR("error to encrypt page: %d\n", rc);
741                         skcipher_request_zero(req);
742                         return rc;
743                 }
744         }
745
746         /* encrypt krb5 header */
747         rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
748         if (rc != 0) {
749                 skcipher_request_zero(req);
750                 return rc;
751         }
752
753         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
754                            sizeof(*khdr));
755         if (rc != 0) {
756                 gss_teardown_sgtable(&sg_src);
757                 skcipher_request_zero(req);
758                 return rc;
759         }
760
761         skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
762                                    sizeof(*khdr), local_iv);
763         rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl,
764                                         sizeof(*khdr));
765         skcipher_request_zero(req);
766
767         gss_teardown_sgtable(&sg_dst);
768         gss_teardown_sgtable(&sg_src);
769
770         if (rc) {
771                 CERROR("error to encrypt krb5 header: %d\n", rc);
772                 return rc;
773         }
774
775         if (adj_nob)
776                 desc->bd_nob = nob;
777
778         return 0;
779 }
780
781 /*
782  * desc->bd_nob_transferred is the size of cipher text received.
783  * desc->bd_nob is the target size of plain text supposed to be.
784  *
785  * if adj_nob != 0, we adjust each page's kiov_len to the actual
786  * plain text size.
787  * - for client read: we don't know data size for each page, so
788  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
789  *   be smaller, so we need to adjust it according to
790  *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
791  *   this means we DO NOT support the situation that server send an odd size
792  *   data in a page which is not the last one.
793  * - for server write: we knows exactly data size for each page being expected,
794  *   thus kiov_len is accurate already, so we should not adjust it at all.
795  *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
796  *   round_up(bd_iov[]->kiov_len) which
797  *   should have been done by prep_bulk().
798  */
799 static
800 int krb5_decrypt_bulk(struct crypto_sync_skcipher *tfm,
801                       struct krb5_header *khdr,
802                       struct ptlrpc_bulk_desc *desc,
803                       rawobj_t *cipher,
804                       rawobj_t *plain,
805                       int adj_nob)
806 {
807         __u8 local_iv[16] = {0};
808         struct scatterlist src, dst;
809         struct sg_table sg_src, sg_dst;
810         int ct_nob = 0, pt_nob = 0;
811         int blocksize, i, rc;
812         SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
813
814         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
815         LASSERT(desc->bd_iov_count);
816         LASSERT(GET_ENC_KIOV(desc));
817         LASSERT(desc->bd_nob_transferred);
818
819         blocksize = crypto_sync_skcipher_blocksize(tfm);
820         LASSERT(blocksize > 1);
821         LASSERT(cipher->len == blocksize + sizeof(*khdr));
822
823         if (desc->bd_nob_transferred % blocksize) {
824                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
825                 return -EPROTO;
826         }
827
828         /* decrypt head (confounder) */
829         rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
830         if (rc != 0)
831                 return rc;
832
833         rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
834         if (rc != 0) {
835                 gss_teardown_sgtable(&sg_src);
836                 return rc;
837         }
838
839         skcipher_request_set_sync_tfm(req, tfm);
840         skcipher_request_set_callback(req, 0, NULL, NULL);
841         skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
842                                    blocksize, local_iv);
843
844         rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
845
846         gss_teardown_sgtable(&sg_dst);
847         gss_teardown_sgtable(&sg_src);
848
849         if (rc) {
850                 CERROR("error to decrypt confounder: %d\n", rc);
851                 skcipher_request_zero(req);
852                 return rc;
853         }
854
855         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
856              i++) {
857                 if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize != 0 ||
858                     BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize != 0) {
859                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
860                                i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
861                                BD_GET_ENC_KIOV(desc, i).kiov_len,
862                                blocksize);
863                         skcipher_request_zero(req);
864                         return -EFAULT;
865                 }
866
867                 if (adj_nob) {
868                         if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
869                             desc->bd_nob_transferred)
870                                 BD_GET_ENC_KIOV(desc, i).kiov_len =
871                                         desc->bd_nob_transferred - ct_nob;
872
873                         BD_GET_KIOV(desc, i).kiov_len =
874                           BD_GET_ENC_KIOV(desc, i).kiov_len;
875                         if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
876                             desc->bd_nob)
877                                 BD_GET_KIOV(desc, i).kiov_len =
878                                   desc->bd_nob - pt_nob;
879                 } else {
880                         /* this should be guaranteed by LNET */
881                         LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
882                                 kiov_len <=
883                                 desc->bd_nob_transferred);
884                         LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
885                                 BD_GET_ENC_KIOV(desc, i).kiov_len);
886                 }
887
888                 if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
889                         continue;
890
891                 sg_init_table(&src, 1);
892                 sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
893                             BD_GET_ENC_KIOV(desc, i).kiov_len,
894                             BD_GET_ENC_KIOV(desc, i).kiov_offset);
895                 dst = src;
896                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
897                         sg_assign_page(&dst,
898                                        BD_GET_KIOV(desc, i).kiov_page);
899
900                 skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
901                                            src.length, local_iv);
902                 rc = crypto_skcipher_decrypt_iv(req, &dst, &src, src.length);
903                 if (rc) {
904                         CERROR("error to decrypt page: %d\n", rc);
905                         skcipher_request_zero(req);
906                         return rc;
907                 }
908
909                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
910                         memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
911                                BD_GET_KIOV(desc, i).kiov_offset,
912                                page_address(BD_GET_ENC_KIOV(desc, i).
913                                             kiov_page) +
914                                BD_GET_KIOV(desc, i).kiov_offset,
915                                BD_GET_KIOV(desc, i).kiov_len);
916                 }
917
918                 ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
919                 pt_nob += BD_GET_KIOV(desc, i).kiov_len;
920         }
921
922         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
923                 CERROR("%d cipher text transferred but only %d decrypted\n",
924                        desc->bd_nob_transferred, ct_nob);
925                 skcipher_request_zero(req);
926                 return -EFAULT;
927         }
928
929         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
930                 CERROR("%d plain text expected but only %d received\n",
931                        desc->bd_nob, pt_nob);
932                 skcipher_request_zero(req);
933                 return -EFAULT;
934         }
935
936         /* if needed, clear up the rest unused iovs */
937         if (adj_nob)
938                 while (i < desc->bd_iov_count)
939                         BD_GET_KIOV(desc, i++).kiov_len = 0;
940
941         /* decrypt tail (krb5 header) */
942         rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
943                                sizeof(*khdr));
944         if (rc != 0)
945                 return rc;
946
947         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
948                                sizeof(*khdr));
949         if (rc != 0) {
950                 gss_teardown_sgtable(&sg_src);
951                 return rc;
952         }
953
954         skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
955                                   src.length, local_iv);
956         rc = crypto_skcipher_decrypt_iv(req, sg_dst.sgl, sg_src.sgl,
957                                         sizeof(*khdr));
958         gss_teardown_sgtable(&sg_src);
959         gss_teardown_sgtable(&sg_dst);
960
961         skcipher_request_zero(req);
962         if (rc) {
963                 CERROR("error to decrypt tail: %d\n", rc);
964                 return rc;
965         }
966
967         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
968                 CERROR("krb5 header doesn't match\n");
969                 return -EACCES;
970         }
971
972         return 0;
973 }
974
975 static
976 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
977                         rawobj_t *gsshdr,
978                         rawobj_t *msg,
979                         int msg_buflen,
980                         rawobj_t *token)
981 {
982         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
983         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
984         struct krb5_header  *khdr;
985         int                  blocksize;
986         rawobj_t             cksum = RAWOBJ_EMPTY;
987         rawobj_t             data_desc[3], cipher;
988         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
989         __u8                 local_iv[16] = {0};
990         u32 major;
991         int                  rc = 0;
992
993         LASSERT(ke);
994         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
995         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
996                 ke->ke_conf_size >=
997                 crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm));
998
999         /*
1000          * final token format:
1001          * ---------------------------------------------------
1002          * | krb5 header | cipher text | checksum (16 bytes) |
1003          * ---------------------------------------------------
1004          */
1005
1006         /* fill krb5 header */
1007         LASSERT(token->len >= sizeof(*khdr));
1008         khdr = (struct krb5_header *)token->data;
1009         fill_krb5_header(kctx, khdr, 1);
1010
1011         /* generate confounder */
1012         cfs_get_random_bytes(conf, ke->ke_conf_size);
1013
1014         /* get encryption blocksize. note kc_keye might not associated with
1015          * a tfm, currently only for arcfour-hmac */
1016         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1017                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1018                 blocksize = 1;
1019         } else {
1020                 LASSERT(kctx->kc_keye.kb_tfm);
1021                 blocksize = crypto_sync_skcipher_blocksize(
1022                                                         kctx->kc_keye.kb_tfm);
1023         }
1024         LASSERT(blocksize <= ke->ke_conf_size);
1025
1026         /* padding the message */
1027         if (gss_add_padding(msg, msg_buflen, blocksize))
1028                 return GSS_S_FAILURE;
1029
1030         /*
1031          * clear text layout for checksum:
1032          * ------------------------------------------------------
1033          * | confounder | gss header | clear msgs | krb5 header |
1034          * ------------------------------------------------------
1035          */
1036         data_desc[0].data = conf;
1037         data_desc[0].len = ke->ke_conf_size;
1038         data_desc[1].data = gsshdr->data;
1039         data_desc[1].len = gsshdr->len;
1040         data_desc[2].data = msg->data;
1041         data_desc[2].len = msg->len;
1042
1043         /* compute checksum */
1044         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1045                                khdr, 3, data_desc, 0, NULL, &cksum,
1046                                gctx->hash_func))
1047                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1048         LASSERT(cksum.len >= ke->ke_hash_size);
1049
1050         /*
1051          * clear text layout for encryption:
1052          * -----------------------------------------
1053          * | confounder | clear msgs | krb5 header |
1054          * -----------------------------------------
1055          */
1056         data_desc[0].data = conf;
1057         data_desc[0].len = ke->ke_conf_size;
1058         data_desc[1].data = msg->data;
1059         data_desc[1].len = msg->len;
1060         data_desc[2].data = (__u8 *) khdr;
1061         data_desc[2].len = sizeof(*khdr);
1062
1063         /* cipher text will be directly inplace */
1064         cipher.data = (__u8 *)(khdr + 1);
1065         cipher.len = token->len - sizeof(*khdr);
1066         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1067
1068         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1069                 rawobj_t arc4_keye = RAWOBJ_EMPTY;
1070                 struct crypto_sync_skcipher *arc4_tfm;
1071
1072                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1073                                        NULL, 1, &cksum, 0, NULL, &arc4_keye,
1074                                        gctx->hash_func)) {
1075                         CERROR("failed to obtain arc4 enc key\n");
1076                         GOTO(arc4_out_key, rc = -EACCES);
1077                 }
1078
1079                 arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
1080                 if (IS_ERR(arc4_tfm)) {
1081                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1082                         GOTO(arc4_out_key, rc = -EACCES);
1083                 }
1084
1085                 if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
1086                                                 arc4_keye.len)) {
1087                         CERROR("failed to set arc4 key, len %d\n",
1088                                arc4_keye.len);
1089                         GOTO(arc4_out_tfm, rc = -EACCES);
1090                 }
1091
1092                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
1093                                        &cipher, 1);
1094 arc4_out_tfm:
1095                 crypto_free_sync_skcipher(arc4_tfm);
1096 arc4_out_key:
1097                 rawobj_free(&arc4_keye);
1098         } else {
1099                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
1100                                        data_desc, &cipher, 1);
1101         }
1102
1103         if (rc)
1104                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1105
1106         /* fill in checksum */
1107         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1108         memcpy((char *)(khdr + 1) + cipher.len,
1109                cksum.data + cksum.len - ke->ke_hash_size,
1110                ke->ke_hash_size);
1111
1112         /* final token length */
1113         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1114         major = GSS_S_COMPLETE;
1115 out_free_cksum:
1116         rawobj_free(&cksum);
1117         return major;
1118 }
1119
1120 static
1121 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1122                              struct ptlrpc_bulk_desc *desc)
1123 {
1124         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1125         int                  blocksize, i;
1126
1127         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1128         LASSERT(desc->bd_iov_count);
1129         LASSERT(GET_ENC_KIOV(desc));
1130         LASSERT(kctx->kc_keye.kb_tfm);
1131
1132         blocksize = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
1133
1134         for (i = 0; i < desc->bd_iov_count; i++) {
1135                 LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
1136                 /*
1137                  * offset should always start at page boundary of either
1138                  * client or server side.
1139                  */
1140                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
1141                         CERROR("odd offset %d in page %d\n",
1142                                BD_GET_KIOV(desc, i).kiov_offset, i);
1143                         return GSS_S_FAILURE;
1144                 }
1145
1146                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
1147                         BD_GET_KIOV(desc, i).kiov_offset;
1148                 BD_GET_ENC_KIOV(desc, i).kiov_len =
1149                         (BD_GET_KIOV(desc, i).kiov_len +
1150                          blocksize - 1) & (~(blocksize - 1));
1151         }
1152
1153         return GSS_S_COMPLETE;
1154 }
1155
1156 static
1157 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1158                              struct ptlrpc_bulk_desc *desc,
1159                              rawobj_t *token, int adj_nob)
1160 {
1161         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1162         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1163         struct krb5_header  *khdr;
1164         int                  blocksz;
1165         rawobj_t             cksum = RAWOBJ_EMPTY;
1166         rawobj_t             data_desc[1], cipher;
1167         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1168         int rc = 0;
1169         u32 major;
1170
1171         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1172         LASSERT(ke);
1173         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1174
1175         /*
1176          * final token format:
1177          * --------------------------------------------------
1178          * | krb5 header | head/tail cipher text | checksum |
1179          * --------------------------------------------------
1180          */
1181
1182         /* fill krb5 header */
1183         LASSERT(token->len >= sizeof(*khdr));
1184         khdr = (struct krb5_header *)token->data;
1185         fill_krb5_header(kctx, khdr, 1);
1186
1187         /* generate confounder */
1188         cfs_get_random_bytes(conf, ke->ke_conf_size);
1189
1190         /* get encryption blocksize. note kc_keye might not associated with
1191          * a tfm, currently only for arcfour-hmac */
1192         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1193                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1194                 blocksz = 1;
1195         } else {
1196                 LASSERT(kctx->kc_keye.kb_tfm);
1197                 blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
1198         }
1199
1200         /*
1201          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1202          * the bulk token size would be exactly (sizeof(krb5_header) +
1203          * blocksize + sizeof(krb5_header) + hashsize)
1204          */
1205         LASSERT(blocksz <= ke->ke_conf_size);
1206         LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
1207         LASSERT(token->len >= sizeof(*khdr) + blocksz + sizeof(*khdr) + 16);
1208
1209         /*
1210          * clear text layout for checksum:
1211          * ------------------------------------------
1212          * | confounder | clear pages | krb5 header |
1213          * ------------------------------------------
1214          */
1215         data_desc[0].data = conf;
1216         data_desc[0].len = ke->ke_conf_size;
1217
1218         /* compute checksum */
1219         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1220                                khdr, 1, data_desc,
1221                                desc->bd_iov_count, GET_KIOV(desc),
1222                                &cksum, gctx->hash_func))
1223                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1224         LASSERT(cksum.len >= ke->ke_hash_size);
1225
1226         /*
1227          * clear text layout for encryption:
1228          * ------------------------------------------
1229          * | confounder | clear pages | krb5 header |
1230          * ------------------------------------------
1231          *        |              |             |
1232          *        ----------  (cipher pages)   |
1233          * result token:   |                   |
1234          * -------------------------------------------
1235          * | krb5 header | cipher text | cipher text |
1236          * -------------------------------------------
1237          */
1238         data_desc[0].data = conf;
1239         data_desc[0].len = ke->ke_conf_size;
1240
1241         cipher.data = (__u8 *)(khdr + 1);
1242         cipher.len = blocksz + sizeof(*khdr);
1243
1244         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1245                 LBUG();
1246                 rc = 0;
1247         } else {
1248                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1249                                        conf, desc, &cipher, adj_nob);
1250         }
1251         if (rc)
1252                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1253
1254         /* fill in checksum */
1255         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1256         memcpy((char *)(khdr + 1) + cipher.len,
1257                cksum.data + cksum.len - ke->ke_hash_size,
1258                ke->ke_hash_size);
1259
1260         /* final token length */
1261         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1262         major = GSS_S_COMPLETE;
1263 out_free_cksum:
1264         rawobj_free(&cksum);
1265         return major;
1266 }
1267
1268 static
1269 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1270                           rawobj_t        *gsshdr,
1271                           rawobj_t        *token,
1272                           rawobj_t        *msg)
1273 {
1274         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1275         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1276         struct krb5_header  *khdr;
1277         unsigned char       *tmpbuf;
1278         int                  blocksz, bodysize;
1279         rawobj_t             cksum = RAWOBJ_EMPTY;
1280         rawobj_t             cipher_in, plain_out;
1281         rawobj_t             hash_objs[3];
1282         int                  rc = 0;
1283         __u32                major;
1284         __u8                 local_iv[16] = {0};
1285
1286         LASSERT(ke);
1287
1288         if (token->len < sizeof(*khdr)) {
1289                 CERROR("short signature: %u\n", token->len);
1290                 return GSS_S_DEFECTIVE_TOKEN;
1291         }
1292
1293         khdr = (struct krb5_header *)token->data;
1294
1295         major = verify_krb5_header(kctx, khdr, 1);
1296         if (major != GSS_S_COMPLETE) {
1297                 CERROR("bad krb5 header\n");
1298                 return major;
1299         }
1300
1301         /* block size */
1302         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1303                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1304                 blocksz = 1;
1305         } else {
1306                 LASSERT(kctx->kc_keye.kb_tfm);
1307                 blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
1308         }
1309
1310         /* expected token layout:
1311          * ----------------------------------------
1312          * | krb5 header | cipher text | checksum |
1313          * ----------------------------------------
1314          */
1315         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1316
1317         if (bodysize % blocksz) {
1318                 CERROR("odd bodysize %d\n", bodysize);
1319                 return GSS_S_DEFECTIVE_TOKEN;
1320         }
1321
1322         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1323                 CERROR("incomplete token: bodysize %d\n", bodysize);
1324                 return GSS_S_DEFECTIVE_TOKEN;
1325         }
1326
1327         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1328                 CERROR("buffer too small: %u, require %d\n",
1329                        msg->len, bodysize - ke->ke_conf_size);
1330                 return GSS_S_FAILURE;
1331         }
1332
1333         /* decrypting */
1334         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1335         if (!tmpbuf)
1336                 return GSS_S_FAILURE;
1337
1338         major = GSS_S_FAILURE;
1339
1340         cipher_in.data = (__u8 *)(khdr + 1);
1341         cipher_in.len = bodysize;
1342         plain_out.data = tmpbuf;
1343         plain_out.len = bodysize;
1344
1345         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1346                 rawobj_t                 arc4_keye;
1347                 struct crypto_sync_skcipher *arc4_tfm;
1348
1349                 cksum.data = token->data + token->len - ke->ke_hash_size;
1350                 cksum.len = ke->ke_hash_size;
1351
1352                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1353                                        NULL, 1, &cksum, 0, NULL, &arc4_keye,
1354                                        gctx->hash_func)) {
1355                         CERROR("failed to obtain arc4 enc key\n");
1356                         GOTO(arc4_out, rc = -EACCES);
1357                 }
1358
1359                 arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
1360                 if (IS_ERR(arc4_tfm)) {
1361                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1362                         GOTO(arc4_out_key, rc = -EACCES);
1363                 }
1364
1365                 if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
1366                                                 arc4_keye.len)) {
1367                         CERROR("failed to set arc4 key, len %d\n",
1368                                arc4_keye.len);
1369                         GOTO(arc4_out_tfm, rc = -EACCES);
1370                 }
1371
1372                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
1373                                        &plain_out, 0);
1374 arc4_out_tfm:
1375                 crypto_free_sync_skcipher(arc4_tfm);
1376 arc4_out_key:
1377                 rawobj_free(&arc4_keye);
1378 arc4_out:
1379                 cksum = RAWOBJ_EMPTY;
1380         } else {
1381                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1,
1382                                        &cipher_in, &plain_out, 0);
1383         }
1384
1385         if (rc != 0) {
1386                 CERROR("error decrypt\n");
1387                 goto out_free;
1388         }
1389         LASSERT(plain_out.len == bodysize);
1390
1391         /* expected clear text layout:
1392          * -----------------------------------------
1393          * | confounder | clear msgs | krb5 header |
1394          * -----------------------------------------
1395          */
1396
1397         /* verify krb5 header in token is not modified */
1398         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1399                    sizeof(*khdr))) {
1400                 CERROR("decrypted krb5 header mismatch\n");
1401                 goto out_free;
1402         }
1403
1404         /* verify checksum, compose clear text as layout:
1405          * ------------------------------------------------------
1406          * | confounder | gss header | clear msgs | krb5 header |
1407          * ------------------------------------------------------
1408          */
1409         hash_objs[0].len = ke->ke_conf_size;
1410         hash_objs[0].data = plain_out.data;
1411         hash_objs[1].len = gsshdr->len;
1412         hash_objs[1].data = gsshdr->data;
1413         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1414         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1415         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1416                                khdr, 3, hash_objs, 0, NULL, &cksum,
1417                                gctx->hash_func))
1418                 goto out_free;
1419
1420         LASSERT(cksum.len >= ke->ke_hash_size);
1421         if (memcmp((char *)(khdr + 1) + bodysize,
1422                    cksum.data + cksum.len - ke->ke_hash_size,
1423                    ke->ke_hash_size)) {
1424                 CERROR("checksum mismatch\n");
1425                 goto out_free;
1426         }
1427
1428         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1429         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1430
1431         major = GSS_S_COMPLETE;
1432 out_free:
1433         OBD_FREE_LARGE(tmpbuf, bodysize);
1434         rawobj_free(&cksum);
1435         return major;
1436 }
1437
1438 static
1439 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1440                                struct ptlrpc_bulk_desc *desc,
1441                                rawobj_t *token, int adj_nob)
1442 {
1443         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1444         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1445         struct krb5_header  *khdr;
1446         int                  blocksz;
1447         rawobj_t             cksum = RAWOBJ_EMPTY;
1448         rawobj_t             cipher, plain;
1449         rawobj_t             data_desc[1];
1450         int                  rc;
1451         __u32                major;
1452
1453         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1454         LASSERT(ke);
1455
1456         if (token->len < sizeof(*khdr)) {
1457                 CERROR("short signature: %u\n", token->len);
1458                 return GSS_S_DEFECTIVE_TOKEN;
1459         }
1460
1461         khdr = (struct krb5_header *)token->data;
1462
1463         major = verify_krb5_header(kctx, khdr, 1);
1464         if (major != GSS_S_COMPLETE) {
1465                 CERROR("bad krb5 header\n");
1466                 return major;
1467         }
1468
1469         /* block size */
1470         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1471                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1472                 blocksz = 1;
1473                 LBUG();
1474         } else {
1475                 LASSERT(kctx->kc_keye.kb_tfm);
1476                 blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
1477         }
1478         LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
1479
1480         /*
1481          * token format is expected as:
1482          * -----------------------------------------------
1483          * | krb5 header | head/tail cipher text | cksum |
1484          * -----------------------------------------------
1485          */
1486         if (token->len < sizeof(*khdr) + blocksz + sizeof(*khdr) +
1487             ke->ke_hash_size) {
1488                 CERROR("short token size: %u\n", token->len);
1489                 return GSS_S_DEFECTIVE_TOKEN;
1490         }
1491
1492         cipher.data = (__u8 *) (khdr + 1);
1493         cipher.len = blocksz + sizeof(*khdr);
1494         plain.data = cipher.data;
1495         plain.len = cipher.len;
1496
1497         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1498                                desc, &cipher, &plain, adj_nob);
1499         if (rc)
1500                 return GSS_S_DEFECTIVE_TOKEN;
1501
1502         /*
1503          * verify checksum, compose clear text as layout:
1504          * ------------------------------------------
1505          * | confounder | clear pages | krb5 header |
1506          * ------------------------------------------
1507          */
1508         data_desc[0].data = plain.data;
1509         data_desc[0].len = blocksz;
1510
1511         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1512                                khdr, 1, data_desc,
1513                                desc->bd_iov_count,
1514                                GET_KIOV(desc),
1515                                &cksum, gctx->hash_func))
1516                 return GSS_S_FAILURE;
1517         LASSERT(cksum.len >= ke->ke_hash_size);
1518
1519         if (memcmp(plain.data + blocksz + sizeof(*khdr),
1520                    cksum.data + cksum.len - ke->ke_hash_size,
1521                    ke->ke_hash_size)) {
1522                 CERROR("checksum mismatch\n");
1523                 rawobj_free(&cksum);
1524                 return GSS_S_BAD_SIG;
1525         }
1526
1527         rawobj_free(&cksum);
1528         return GSS_S_COMPLETE;
1529 }
1530
1531 int gss_display_kerberos(struct gss_ctx        *ctx,
1532                          char                  *buf,
1533                          int                    bufsize)
1534 {
1535         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1536         int                 written;
1537
1538         written = snprintf(buf, bufsize, "krb5 (%s)",
1539                            enctype2str(kctx->kc_enctype));
1540         return written;
1541 }
1542
1543 static struct gss_api_ops gss_kerberos_ops = {
1544         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1545         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1546         .gss_inquire_context        = gss_inquire_context_kerberos,
1547         .gss_get_mic                = gss_get_mic_kerberos,
1548         .gss_verify_mic             = gss_verify_mic_kerberos,
1549         .gss_wrap                   = gss_wrap_kerberos,
1550         .gss_unwrap                 = gss_unwrap_kerberos,
1551         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1552         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1553         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1554         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1555         .gss_display                = gss_display_kerberos,
1556 };
1557
1558 static struct subflavor_desc gss_kerberos_sfs[] = {
1559         {
1560                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1561                 .sf_qop         = 0,
1562                 .sf_service     = SPTLRPC_SVC_NULL,
1563                 .sf_name        = "krb5n"
1564         },
1565         {
1566                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1567                 .sf_qop         = 0,
1568                 .sf_service     = SPTLRPC_SVC_AUTH,
1569                 .sf_name        = "krb5a"
1570         },
1571         {
1572                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1573                 .sf_qop         = 0,
1574                 .sf_service     = SPTLRPC_SVC_INTG,
1575                 .sf_name        = "krb5i"
1576         },
1577         {
1578                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1579                 .sf_qop         = 0,
1580                 .sf_service     = SPTLRPC_SVC_PRIV,
1581                 .sf_name        = "krb5p"
1582         },
1583 };
1584
1585 static struct gss_api_mech gss_kerberos_mech = {
1586         /* .gm_owner uses default NULL value for THIS_MODULE */
1587         .gm_name        = "krb5",
1588         .gm_oid         = (rawobj_t)
1589                                 {9, "\052\206\110\206\367\022\001\002\002"},
1590         .gm_ops         = &gss_kerberos_ops,
1591         .gm_sf_num      = 4,
1592         .gm_sfs         = gss_kerberos_sfs,
1593 };
1594
1595 int __init init_kerberos_module(void)
1596 {
1597         int status;
1598
1599         spin_lock_init(&krb5_seq_lock);
1600
1601         status = lgss_mech_register(&gss_kerberos_mech);
1602         if (status)
1603                 CERROR("Failed to register kerberos gss mechanism!\n");
1604         return status;
1605 }
1606
1607 void cleanup_kerberos_module(void)
1608 {
1609         lgss_mech_unregister(&gss_kerberos_mech);
1610 }