Whamcloud - gitweb
LU-13098 ptlrpc: supress connection restored message
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2  * Modifications for Lustre
3  *
4  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5  *
6  * Copyright (c) 2011, 2015, Intel Corporation.
7  *
8  * Author: Eric Mei <ericm@clusterfs.com>
9  */
10
11 /*
12  *  linux/net/sunrpc/gss_krb5_mech.c
13  *  linux/net/sunrpc/gss_krb5_crypto.c
14  *  linux/net/sunrpc/gss_krb5_seal.c
15  *  linux/net/sunrpc/gss_krb5_seqnum.c
16  *  linux/net/sunrpc/gss_krb5_unseal.c
17  *
18  *  Copyright (c) 2001 The Regents of the University of Michigan.
19  *  All rights reserved.
20  *
21  *  Andy Adamson <andros@umich.edu>
22  *  J. Bruce Fields <bfields@umich.edu>
23  *
24  *  Redistribution and use in source and binary forms, with or without
25  *  modification, are permitted provided that the following conditions
26  *  are met:
27  *
28  *  1. Redistributions of source code must retain the above copyright
29  *     notice, this list of conditions and the following disclaimer.
30  *  2. Redistributions in binary form must reproduce the above copyright
31  *     notice, this list of conditions and the following disclaimer in the
32  *     documentation and/or other materials provided with the distribution.
33  *  3. Neither the name of the University nor the names of its
34  *     contributors may be used to endorse or promote products derived
35  *     from this software without specific prior written permission.
36  *
37  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/random.h>
55 #include <linux/slab.h>
56 #include <linux/crypto.h>
57 #include <linux/mutex.h>
58
59 #include <obd.h>
60 #include <obd_class.h>
61 #include <obd_support.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
65
66 #include "gss_err.h"
67 #include "gss_internal.h"
68 #include "gss_api.h"
69 #include "gss_asn1.h"
70 #include "gss_krb5.h"
71 #include "gss_crypto.h"
72
73 static DEFINE_SPINLOCK(krb5_seq_lock);
74
75 struct krb5_enctype {
76         char           *ke_dispname;
77         char           *ke_enc_name;            /* linux tfm name */
78         char           *ke_hash_name;           /* linux tfm name */
79         int             ke_enc_mode;            /* linux tfm mode */
80         int             ke_hash_size;           /* checksum size */
81         int             ke_conf_size;           /* confounder size */
82         unsigned int    ke_hash_hmac:1;         /* is hmac? */
83 };
84
85 /*
86  * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
87  * but currently we simply CBC with padding, because linux doesn't support CTS
88  * yet. this need to be fixed in the future.
89  */
90 static struct krb5_enctype enctypes[] = {
91         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
92                 .ke_dispname    = "des-cbc-md5",
93                 .ke_enc_name    = "cbc(des)",
94                 .ke_hash_name   = "md5",
95                 .ke_hash_size   = 16,
96                 .ke_conf_size   = 8,
97         },
98         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
99                 .ke_dispname    = "des3-hmac-sha1",
100                 .ke_enc_name    = "cbc(des3_ede)",
101                 .ke_hash_name   = "sha1",
102                 .ke_hash_size   = 20,
103                 .ke_conf_size   = 8,
104                 .ke_hash_hmac   = 1,
105         },
106         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
107                 .ke_dispname    = "aes128-cts-hmac-sha1-96",
108                 .ke_enc_name    = "cbc(aes)",
109                 .ke_hash_name   = "sha1",
110                 .ke_hash_size   = 12,
111                 .ke_conf_size   = 16,
112                 .ke_hash_hmac   = 1,
113         },
114         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
115                 .ke_dispname    = "aes256-cts-hmac-sha1-96",
116                 .ke_enc_name    = "cbc(aes)",
117                 .ke_hash_name   = "sha1",
118                 .ke_hash_size   = 12,
119                 .ke_conf_size   = 16,
120                 .ke_hash_hmac   = 1,
121         },
122         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
123                 .ke_dispname    = "arcfour-hmac-md5",
124                 .ke_enc_name    = "ecb(arc4)",
125                 .ke_hash_name   = "md5",
126                 .ke_hash_size   = 16,
127                 .ke_conf_size   = 8,
128                 .ke_hash_hmac   = 1,
129         }
130 };
131
132 static const char * enctype2str(__u32 enctype)
133 {
134         if (enctype < ARRAY_SIZE(enctypes) && enctypes[enctype].ke_dispname)
135                 return enctypes[enctype].ke_dispname;
136
137         return "unknown";
138 }
139
140 static
141 int krb5_init_keys(struct krb5_ctx *kctx)
142 {
143         struct krb5_enctype *ke;
144
145         if (kctx->kc_enctype >= ARRAY_SIZE(enctypes) ||
146             enctypes[kctx->kc_enctype].ke_hash_size == 0) {
147                 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
148                 return -1;
149         }
150
151         ke = &enctypes[kctx->kc_enctype];
152
153         /* tfm arc4 is stateful, user should alloc-use-free by his own */
154         if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
155             gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
156                 return -1;
157
158         /* tfm hmac is stateful, user should alloc-use-free by his own */
159         if (ke->ke_hash_hmac == 0 &&
160             gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
161                 return -1;
162         if (ke->ke_hash_hmac == 0 &&
163             gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
164                 return -1;
165
166         return 0;
167 }
168
169 static
170 void delete_context_kerberos(struct krb5_ctx *kctx)
171 {
172         rawobj_free(&kctx->kc_mech_used);
173
174         gss_keyblock_free(&kctx->kc_keye);
175         gss_keyblock_free(&kctx->kc_keyi);
176         gss_keyblock_free(&kctx->kc_keyc);
177 }
178
179 static
180 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
181 {
182         unsigned int    tmp_uint, keysize;
183
184         /* seed_init flag */
185         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
186                 goto out_err;
187         kctx->kc_seed_init = (tmp_uint != 0);
188
189         /* seed */
190         if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
191                 goto out_err;
192
193         /* sign/seal algorithm, not really used now */
194         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
195             gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
196                 goto out_err;
197
198         /* end time. While kc_endtime might be 64 bit the krb5 API
199          * still uses 32 bits. To delay the 2038 bug see the incoming
200          * value as a u32 which give us until 2106. See the link for details:
201          *
202          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
203          */
204         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
205                 goto out_err;
206
207         /* seq send */
208         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
209                 goto out_err;
210         kctx->kc_seq_send = tmp_uint;
211
212         /* mech oid */
213         if (gss_get_rawobj(&p, end, &kctx->kc_mech_used))
214                 goto out_err;
215
216         /* old style enc/seq keys in format:
217          *   - enctype (u32)
218          *   - keysize (u32)
219          *   - keydata
220          * we decompose them to fit into the new context
221          */
222
223         /* enc key */
224         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
225                 goto out_err;
226
227         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
228                 goto out_err;
229
230         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
231                 goto out_err;
232
233         /* seq key */
234         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
235             tmp_uint != kctx->kc_enctype)
236                 goto out_err;
237
238         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
239             tmp_uint != keysize)
240                 goto out_err;
241
242         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
243                 goto out_err;
244
245         /* old style fallback */
246         if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
247                 goto out_err;
248
249         if (p != end)
250                 goto out_err;
251
252         CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
253         return 0;
254 out_err:
255         return GSS_S_FAILURE;
256 }
257
258 /* Flags for version 2 context flags */
259 #define KRB5_CTX_FLAG_INITIATOR         0x00000001
260 #define KRB5_CTX_FLAG_CFX               0x00000002
261 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
262
263 static
264 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
265 {
266         unsigned int    tmp_uint, keysize;
267
268         /* end time. While kc_endtime might be 64 bit the krb5 API
269          * still uses 32 bits. To delay the 2038 bug see the incoming
270          * value as a u32 which give us until 2106. See the link for details:
271          *
272          * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
273          */
274         if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
275                 goto out_err;
276
277         /* flags */
278         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
279                 goto out_err;
280
281         if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
282                 kctx->kc_initiate = 1;
283         if (tmp_uint & KRB5_CTX_FLAG_CFX)
284                 kctx->kc_cfx = 1;
285         if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
286                 kctx->kc_have_acceptor_subkey = 1;
287
288         /* seq send */
289         if (gss_get_bytes(&p, end, &kctx->kc_seq_send,
290             sizeof(kctx->kc_seq_send)))
291                 goto out_err;
292
293         /* enctype */
294         if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
295                 goto out_err;
296
297         /* size of each key */
298         if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
299                 goto out_err;
300
301         /* number of keys - should always be 3 */
302         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
303                 goto out_err;
304
305         if (tmp_uint != 3) {
306                 CERROR("Invalid number of keys: %u\n", tmp_uint);
307                 goto out_err;
308         }
309
310         /* ke */
311         if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
312                 goto out_err;
313         /* ki */
314         if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize))
315                 goto out_err;
316         /* ki */
317         if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
318                 goto out_err;
319
320         CDEBUG(D_SEC, "successfully imported v2 context\n");
321         return 0;
322 out_err:
323         return GSS_S_FAILURE;
324 }
325
326 /*
327  * The whole purpose here is trying to keep user level gss context parsing
328  * from nfs-utils unchanged as possible as we can, they are not quite mature
329  * yet, and many stuff still not clear, like heimdal etc.
330  */
331 static
332 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
333                                       struct gss_ctx *gctx)
334 {
335         struct krb5_ctx *kctx;
336         char *p = (char *)inbuf->data;
337         char *end = (char *)(inbuf->data + inbuf->len);
338         unsigned int tmp_uint, rc;
339
340         if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
341                 CERROR("Fail to read version\n");
342                 return GSS_S_FAILURE;
343         }
344
345         /* only support 0, 1 for the moment */
346         if (tmp_uint > 2) {
347                 CERROR("Invalid version %u\n", tmp_uint);
348                 return GSS_S_FAILURE;
349         }
350
351         OBD_ALLOC_PTR(kctx);
352         if (!kctx)
353                 return GSS_S_FAILURE;
354
355         if (tmp_uint == 0 || tmp_uint == 1) {
356                 kctx->kc_initiate = tmp_uint;
357                 rc = import_context_rfc1964(kctx, p, end);
358         } else {
359                 rc = import_context_rfc4121(kctx, p, end);
360         }
361
362         if (rc == 0)
363                 rc = krb5_init_keys(kctx);
364
365         if (rc) {
366                 delete_context_kerberos(kctx);
367                 OBD_FREE_PTR(kctx);
368
369                 return GSS_S_FAILURE;
370         }
371
372         gctx->internal_ctx_id = kctx;
373         return GSS_S_COMPLETE;
374 }
375
376 static
377 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
378                                         struct gss_ctx *gctx_new)
379 {
380         struct krb5_ctx *kctx = gctx->internal_ctx_id;
381         struct krb5_ctx *knew;
382
383         OBD_ALLOC_PTR(knew);
384         if (!knew)
385                 return GSS_S_FAILURE;
386
387         knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
388         knew->kc_cfx = kctx->kc_cfx;
389         knew->kc_seed_init = kctx->kc_seed_init;
390         knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
391         knew->kc_endtime = kctx->kc_endtime;
392
393         memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
394         knew->kc_seq_send = kctx->kc_seq_recv;
395         knew->kc_seq_recv = kctx->kc_seq_send;
396         knew->kc_enctype = kctx->kc_enctype;
397
398         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
399                 goto out_err;
400
401         if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
402                 goto out_err;
403         if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
404                 goto out_err;
405         if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
406                 goto out_err;
407         if (krb5_init_keys(knew))
408                 goto out_err;
409
410         gctx_new->internal_ctx_id = knew;
411         CDEBUG(D_SEC, "successfully copied reverse context\n");
412         return GSS_S_COMPLETE;
413
414 out_err:
415         delete_context_kerberos(knew);
416         OBD_FREE_PTR(knew);
417         return GSS_S_FAILURE;
418 }
419
420 static
421 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
422                                    time64_t *endtime)
423 {
424         struct krb5_ctx *kctx = gctx->internal_ctx_id;
425
426         *endtime = kctx->kc_endtime;
427         return GSS_S_COMPLETE;
428 }
429
430 static
431 void gss_delete_sec_context_kerberos(void *internal_ctx)
432 {
433         struct krb5_ctx *kctx = internal_ctx;
434
435         delete_context_kerberos(kctx);
436         OBD_FREE_PTR(kctx);
437 }
438
439 /*
440  * compute (keyed/keyless) checksum against the plain text which appended
441  * with krb5 wire token header.
442  */
443 static
444 __s32 krb5_make_checksum(__u32 enctype,
445                          struct gss_keyblock *kb,
446                          struct krb5_header *khdr,
447                          int msgcnt, rawobj_t *msgs,
448                          int iovcnt, lnet_kiov_t *iovs,
449                          rawobj_t *cksum,
450                          digest_hash hash_func)
451 {
452         struct krb5_enctype *ke = &enctypes[enctype];
453         struct ahash_request *req = NULL;
454         enum cfs_crypto_hash_alg hash_algo;
455         rawobj_t hdr;
456         int rc;
457
458         hash_algo = cfs_crypto_hash_alg(ke->ke_hash_name);
459
460         /* For the cbc(des) case we want md5 instead of hmac(md5) */
461         if (strcmp(ke->ke_enc_name, "cbc(des)"))
462                 req = cfs_crypto_hash_init(hash_algo, kb->kb_key.data,
463                                            kb->kb_key.len);
464         else
465                 req = cfs_crypto_hash_init(hash_algo, NULL, 0);
466         if (IS_ERR(req)) {
467                 rc = PTR_ERR(req);
468                 CERROR("failed to alloc hash %s : rc = %d\n",
469                        ke->ke_hash_name, rc);
470                 goto out_no_hash;
471         }
472
473         cksum->len = cfs_crypto_hash_digestsize(hash_algo);
474         OBD_ALLOC_LARGE(cksum->data, cksum->len);
475         if (!cksum->data) {
476                 cksum->len = 0;
477                 rc = -ENOMEM;
478                 goto out_free_hash;
479         }
480
481         hdr.data = (__u8 *)khdr;
482         hdr.len = sizeof(*khdr);
483
484         if (!hash_func) {
485                 rc = -EPROTO;
486                 CERROR("hash function for %s undefined\n",
487                        ke->ke_hash_name);
488                 goto out_free_hash;
489         }
490         rc = hash_func(req, &hdr, msgcnt, msgs, iovcnt, iovs);
491         if (rc)
492                 goto out_free_hash;
493
494         if (!ke->ke_hash_hmac) {
495                 LASSERT(kb->kb_tfm);
496
497                 cfs_crypto_hash_final(req, cksum->data, &cksum->len);
498                 rc = gss_crypt_generic(kb->kb_tfm, 0, NULL,
499                                        cksum->data, cksum->data,
500                                        cksum->len);
501                 goto out_no_hash;
502         }
503
504 out_free_hash:
505         if (req)
506                 cfs_crypto_hash_final(req, cksum->data, &cksum->len);
507 out_no_hash:
508         return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
509 }
510
511 static void fill_krb5_header(struct krb5_ctx *kctx,
512                              struct krb5_header *khdr,
513                              int privacy)
514 {
515         unsigned char acceptor_flag;
516
517         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
518
519         if (privacy) {
520                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
521                 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
522                 khdr->kh_ec = cpu_to_be16(0);
523                 khdr->kh_rrc = cpu_to_be16(0);
524         } else {
525                 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
526                 khdr->kh_flags = acceptor_flag;
527                 khdr->kh_ec = cpu_to_be16(0xffff);
528                 khdr->kh_rrc = cpu_to_be16(0xffff);
529         }
530
531         khdr->kh_filler = 0xff;
532         spin_lock(&krb5_seq_lock);
533         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
534         spin_unlock(&krb5_seq_lock);
535 }
536
537 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
538                                 struct krb5_header *khdr,
539                                 int privacy)
540 {
541         unsigned char acceptor_flag;
542         __u16         tok_id, ec_rrc;
543
544         acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
545
546         if (privacy) {
547                 tok_id = KG_TOK_WRAP_MSG;
548                 ec_rrc = 0x0;
549         } else {
550                 tok_id = KG_TOK_MIC_MSG;
551                 ec_rrc = 0xffff;
552         }
553
554         /* sanity checks */
555         if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
556                 CERROR("bad token id\n");
557                 return GSS_S_DEFECTIVE_TOKEN;
558         }
559         if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
560                 CERROR("bad direction flag\n");
561                 return GSS_S_BAD_SIG;
562         }
563         if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
564                 CERROR("missing confidential flag\n");
565                 return GSS_S_BAD_SIG;
566         }
567         if (khdr->kh_filler != 0xff) {
568                 CERROR("bad filler\n");
569                 return GSS_S_DEFECTIVE_TOKEN;
570         }
571         if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
572             be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
573                 CERROR("bad EC or RRC\n");
574                 return GSS_S_DEFECTIVE_TOKEN;
575         }
576         return GSS_S_COMPLETE;
577 }
578
579 static
580 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
581                            int msgcnt,
582                            rawobj_t *msgs,
583                            int iovcnt,
584                            lnet_kiov_t *iovs,
585                            rawobj_t *token)
586 {
587         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
588         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
589         struct krb5_header  *khdr;
590         rawobj_t cksum = RAWOBJ_EMPTY;
591         u32 major;
592
593         /* fill krb5 header */
594         LASSERT(token->len >= sizeof(*khdr));
595         khdr = (struct krb5_header *)token->data;
596         fill_krb5_header(kctx, khdr, 0);
597
598         /* checksum */
599         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, khdr,
600                                msgcnt, msgs, iovcnt, iovs, &cksum,
601                                gctx->hash_func))
602                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
603
604         LASSERT(cksum.len >= ke->ke_hash_size);
605         LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
606         memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
607                ke->ke_hash_size);
608
609         token->len = sizeof(*khdr) + ke->ke_hash_size;
610         major = GSS_S_COMPLETE;
611 out_free_cksum:
612         rawobj_free(&cksum);
613         return major;
614 }
615
616 static
617 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
618                               int msgcnt,
619                               rawobj_t *msgs,
620                               int iovcnt,
621                               lnet_kiov_t *iovs,
622                               rawobj_t *token)
623 {
624         struct krb5_ctx *kctx = gctx->internal_ctx_id;
625         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
626         struct krb5_header *khdr;
627         rawobj_t cksum = RAWOBJ_EMPTY;
628         u32 major;
629
630         if (token->len < sizeof(*khdr)) {
631                 CERROR("short signature: %u\n", token->len);
632                 return GSS_S_DEFECTIVE_TOKEN;
633         }
634
635         khdr = (struct krb5_header *)token->data;
636
637         major = verify_krb5_header(kctx, khdr, 0);
638         if (major != GSS_S_COMPLETE) {
639                 CERROR("bad krb5 header\n");
640                 goto out;
641         }
642
643         if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
644                 CERROR("short signature: %u, require %d\n",
645                        token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
646                 GOTO(out, major = GSS_S_FAILURE);
647         }
648
649         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
650                                khdr, msgcnt, msgs, iovcnt, iovs, &cksum,
651                                gctx->hash_func))
652                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
653
654         LASSERT(cksum.len >= ke->ke_hash_size);
655         if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
656                    ke->ke_hash_size)) {
657                 CERROR("checksum mismatch\n");
658                 GOTO(out_free_cksum, major = GSS_S_BAD_SIG);
659         }
660         major = GSS_S_COMPLETE;
661 out_free_cksum:
662         rawobj_free(&cksum);
663 out:
664         return major;
665 }
666
667 /*
668  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
669  */
670 static
671 int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
672                       struct krb5_header *khdr,
673                       char *confounder,
674                       struct ptlrpc_bulk_desc *desc,
675                       rawobj_t *cipher,
676                       int adj_nob)
677 {
678         struct blkcipher_desc   ciph_desc;
679         __u8                    local_iv[16] = {0};
680         struct scatterlist      src, dst;
681         struct sg_table         sg_src, sg_dst;
682         int                     blocksize, i, rc, nob = 0;
683
684         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
685         LASSERT(desc->bd_iov_count);
686         LASSERT(GET_ENC_KIOV(desc));
687
688         blocksize = crypto_blkcipher_blocksize(tfm);
689         LASSERT(blocksize > 1);
690         LASSERT(cipher->len == blocksize + sizeof(*khdr));
691
692         ciph_desc.tfm  = tfm;
693         ciph_desc.info = local_iv;
694         ciph_desc.flags = 0;
695
696         /* encrypt confounder */
697         rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
698         if (rc != 0)
699                 return rc;
700
701         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
702         if (rc != 0) {
703                 gss_teardown_sgtable(&sg_src);
704                 return rc;
705         }
706
707         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
708                                          sg_src.sgl, blocksize);
709
710         gss_teardown_sgtable(&sg_dst);
711         gss_teardown_sgtable(&sg_src);
712
713         if (rc) {
714                 CERROR("error to encrypt confounder: %d\n", rc);
715                 return rc;
716         }
717
718         /* encrypt clear pages */
719         for (i = 0; i < desc->bd_iov_count; i++) {
720                 sg_init_table(&src, 1);
721                 sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
722                             (BD_GET_KIOV(desc, i).kiov_len +
723                                 blocksize - 1) &
724                             (~(blocksize - 1)),
725                             BD_GET_KIOV(desc, i).kiov_offset);
726                 if (adj_nob)
727                         nob += src.length;
728                 sg_init_table(&dst, 1);
729                 sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
730                             src.length, src.offset);
731
732                 BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
733                 BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
734
735                 rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
736                                                     src.length);
737                 if (rc) {
738                         CERROR("error to encrypt page: %d\n", rc);
739                         return rc;
740                 }
741         }
742
743         /* encrypt krb5 header */
744         rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
745         if (rc != 0)
746                 return rc;
747
748         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
749                            sizeof(*khdr));
750         if (rc != 0) {
751                 gss_teardown_sgtable(&sg_src);
752                 return rc;
753         }
754
755         rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
756                                          sizeof(*khdr));
757
758         gss_teardown_sgtable(&sg_dst);
759         gss_teardown_sgtable(&sg_src);
760
761         if (rc) {
762                 CERROR("error to encrypt krb5 header: %d\n", rc);
763                 return rc;
764         }
765
766         if (adj_nob)
767                 desc->bd_nob = nob;
768
769         return 0;
770 }
771
772 /*
773  * desc->bd_nob_transferred is the size of cipher text received.
774  * desc->bd_nob is the target size of plain text supposed to be.
775  *
776  * if adj_nob != 0, we adjust each page's kiov_len to the actual
777  * plain text size.
778  * - for client read: we don't know data size for each page, so
779  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
780  *   be smaller, so we need to adjust it according to
781  *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
782  *   this means we DO NOT support the situation that server send an odd size
783  *   data in a page which is not the last one.
784  * - for server write: we knows exactly data size for each page being expected,
785  *   thus kiov_len is accurate already, so we should not adjust it at all.
786  *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
787  *   round_up(bd_iov[]->kiov_len) which
788  *   should have been done by prep_bulk().
789  */
790 static
791 int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
792                       struct krb5_header *khdr,
793                       struct ptlrpc_bulk_desc *desc,
794                       rawobj_t *cipher,
795                       rawobj_t *plain,
796                       int adj_nob)
797 {
798         struct blkcipher_desc   ciph_desc;
799         __u8                    local_iv[16] = {0};
800         struct scatterlist      src, dst;
801         struct sg_table         sg_src, sg_dst;
802         int                     ct_nob = 0, pt_nob = 0;
803         int                     blocksize, i, rc;
804
805         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
806         LASSERT(desc->bd_iov_count);
807         LASSERT(GET_ENC_KIOV(desc));
808         LASSERT(desc->bd_nob_transferred);
809
810         blocksize = crypto_blkcipher_blocksize(tfm);
811         LASSERT(blocksize > 1);
812         LASSERT(cipher->len == blocksize + sizeof(*khdr));
813
814         ciph_desc.tfm  = tfm;
815         ciph_desc.info = local_iv;
816         ciph_desc.flags = 0;
817
818         if (desc->bd_nob_transferred % blocksize) {
819                 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
820                 return -EPROTO;
821         }
822
823         /* decrypt head (confounder) */
824         rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
825         if (rc != 0)
826                 return rc;
827
828         rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
829         if (rc != 0) {
830                 gss_teardown_sgtable(&sg_src);
831                 return rc;
832         }
833
834         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
835                                          sg_src.sgl, blocksize);
836
837         gss_teardown_sgtable(&sg_dst);
838         gss_teardown_sgtable(&sg_src);
839
840         if (rc) {
841                 CERROR("error to decrypt confounder: %d\n", rc);
842                 return rc;
843         }
844
845         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
846              i++) {
847                 if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
848                     != 0 ||
849                     BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
850                     != 0) {
851                         CERROR("page %d: odd offset %u len %u, blocksize %d\n",
852                                i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
853                                BD_GET_ENC_KIOV(desc, i).kiov_len,
854                                blocksize);
855                         return -EFAULT;
856                 }
857
858                 if (adj_nob) {
859                         if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
860                             desc->bd_nob_transferred)
861                                 BD_GET_ENC_KIOV(desc, i).kiov_len =
862                                         desc->bd_nob_transferred - ct_nob;
863
864                         BD_GET_KIOV(desc, i).kiov_len =
865                           BD_GET_ENC_KIOV(desc, i).kiov_len;
866                         if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
867                             desc->bd_nob)
868                                 BD_GET_KIOV(desc, i).kiov_len =
869                                   desc->bd_nob - pt_nob;
870                 } else {
871                         /* this should be guaranteed by LNET */
872                         LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
873                                 kiov_len <=
874                                 desc->bd_nob_transferred);
875                         LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
876                                 BD_GET_ENC_KIOV(desc, i).kiov_len);
877                 }
878
879                 if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
880                         continue;
881
882                 sg_init_table(&src, 1);
883                 sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
884                             BD_GET_ENC_KIOV(desc, i).kiov_len,
885                             BD_GET_ENC_KIOV(desc, i).kiov_offset);
886                 dst = src;
887                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
888                         sg_assign_page(&dst,
889                                        BD_GET_KIOV(desc, i).kiov_page);
890
891                 rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
892                                                  src.length);
893                 if (rc) {
894                         CERROR("error to decrypt page: %d\n", rc);
895                         return rc;
896                 }
897
898                 if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
899                         memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
900                                BD_GET_KIOV(desc, i).kiov_offset,
901                                page_address(BD_GET_ENC_KIOV(desc, i).
902                                             kiov_page) +
903                                BD_GET_KIOV(desc, i).kiov_offset,
904                                BD_GET_KIOV(desc, i).kiov_len);
905                 }
906
907                 ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
908                 pt_nob += BD_GET_KIOV(desc, i).kiov_len;
909         }
910
911         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
912                 CERROR("%d cipher text transferred but only %d decrypted\n",
913                        desc->bd_nob_transferred, ct_nob);
914                 return -EFAULT;
915         }
916
917         if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
918                 CERROR("%d plain text expected but only %d received\n",
919                        desc->bd_nob, pt_nob);
920                 return -EFAULT;
921         }
922
923         /* if needed, clear up the rest unused iovs */
924         if (adj_nob)
925                 while (i < desc->bd_iov_count)
926                         BD_GET_KIOV(desc, i++).kiov_len = 0;
927
928         /* decrypt tail (krb5 header) */
929         rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
930                                sizeof(*khdr));
931         if (rc != 0)
932                 return rc;
933
934         rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
935                                sizeof(*khdr));
936         if (rc != 0) {
937                 gss_teardown_sgtable(&sg_src);
938                 return rc;
939         }
940
941         rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
942                                          sizeof(*khdr));
943
944         gss_teardown_sgtable(&sg_src);
945         gss_teardown_sgtable(&sg_dst);
946
947         if (rc) {
948                 CERROR("error to decrypt tail: %d\n", rc);
949                 return rc;
950         }
951
952         if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
953                 CERROR("krb5 header doesn't match\n");
954                 return -EACCES;
955         }
956
957         return 0;
958 }
959
960 static
961 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
962                         rawobj_t *gsshdr,
963                         rawobj_t *msg,
964                         int msg_buflen,
965                         rawobj_t *token)
966 {
967         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
968         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
969         struct krb5_header  *khdr;
970         int                  blocksize;
971         rawobj_t             cksum = RAWOBJ_EMPTY;
972         rawobj_t             data_desc[3], cipher;
973         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
974         __u8                 local_iv[16] = {0};
975         u32 major;
976         int                  rc = 0;
977
978         LASSERT(ke);
979         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
980         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
981                 ke->ke_conf_size >=
982                 crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
983
984         /*
985          * final token format:
986          * ---------------------------------------------------
987          * | krb5 header | cipher text | checksum (16 bytes) |
988          * ---------------------------------------------------
989          */
990
991         /* fill krb5 header */
992         LASSERT(token->len >= sizeof(*khdr));
993         khdr = (struct krb5_header *)token->data;
994         fill_krb5_header(kctx, khdr, 1);
995
996         /* generate confounder */
997         get_random_bytes(conf, ke->ke_conf_size);
998
999         /* get encryption blocksize. note kc_keye might not associated with
1000          * a tfm, currently only for arcfour-hmac */
1001         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1002                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1003                 blocksize = 1;
1004         } else {
1005                 LASSERT(kctx->kc_keye.kb_tfm);
1006                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1007         }
1008         LASSERT(blocksize <= ke->ke_conf_size);
1009
1010         /* padding the message */
1011         if (gss_add_padding(msg, msg_buflen, blocksize))
1012                 return GSS_S_FAILURE;
1013
1014         /*
1015          * clear text layout for checksum:
1016          * ------------------------------------------------------
1017          * | confounder | gss header | clear msgs | krb5 header |
1018          * ------------------------------------------------------
1019          */
1020         data_desc[0].data = conf;
1021         data_desc[0].len = ke->ke_conf_size;
1022         data_desc[1].data = gsshdr->data;
1023         data_desc[1].len = gsshdr->len;
1024         data_desc[2].data = msg->data;
1025         data_desc[2].len = msg->len;
1026
1027         /* compute checksum */
1028         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1029                                khdr, 3, data_desc, 0, NULL, &cksum,
1030                                gctx->hash_func))
1031                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1032         LASSERT(cksum.len >= ke->ke_hash_size);
1033
1034         /*
1035          * clear text layout for encryption:
1036          * -----------------------------------------
1037          * | confounder | clear msgs | krb5 header |
1038          * -----------------------------------------
1039          */
1040         data_desc[0].data = conf;
1041         data_desc[0].len = ke->ke_conf_size;
1042         data_desc[1].data = msg->data;
1043         data_desc[1].len = msg->len;
1044         data_desc[2].data = (__u8 *) khdr;
1045         data_desc[2].len = sizeof(*khdr);
1046
1047         /* cipher text will be directly inplace */
1048         cipher.data = (__u8 *)(khdr + 1);
1049         cipher.len = token->len - sizeof(*khdr);
1050         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1051
1052         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1053                 rawobj_t arc4_keye = RAWOBJ_EMPTY;
1054                 struct crypto_blkcipher *arc4_tfm;
1055
1056                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1057                                        NULL, 1, &cksum, 0, NULL, &arc4_keye,
1058                                        gctx->hash_func)) {
1059                         CERROR("failed to obtain arc4 enc key\n");
1060                         GOTO(arc4_out_key, rc = -EACCES);
1061                 }
1062
1063                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1064                 if (IS_ERR(arc4_tfm)) {
1065                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1066                         GOTO(arc4_out_key, rc = -EACCES);
1067                 }
1068
1069                 if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1070                                             arc4_keye.len)) {
1071                         CERROR("failed to set arc4 key, len %d\n",
1072                                arc4_keye.len);
1073                         GOTO(arc4_out_tfm, rc = -EACCES);
1074                 }
1075
1076                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
1077                                        &cipher, 1);
1078 arc4_out_tfm:
1079                 crypto_free_blkcipher(arc4_tfm);
1080 arc4_out_key:
1081                 rawobj_free(&arc4_keye);
1082         } else {
1083                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
1084                                        data_desc, &cipher, 1);
1085         }
1086
1087         if (rc)
1088                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1089
1090         /* fill in checksum */
1091         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1092         memcpy((char *)(khdr + 1) + cipher.len,
1093                cksum.data + cksum.len - ke->ke_hash_size,
1094                ke->ke_hash_size);
1095
1096         /* final token length */
1097         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1098         major = GSS_S_COMPLETE;
1099 out_free_cksum:
1100         rawobj_free(&cksum);
1101         return major;
1102 }
1103
1104 static
1105 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1106                              struct ptlrpc_bulk_desc *desc)
1107 {
1108         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1109         int                  blocksize, i;
1110
1111         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1112         LASSERT(desc->bd_iov_count);
1113         LASSERT(GET_ENC_KIOV(desc));
1114         LASSERT(kctx->kc_keye.kb_tfm);
1115
1116         blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1117
1118         for (i = 0; i < desc->bd_iov_count; i++) {
1119                 LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
1120                 /*
1121                  * offset should always start at page boundary of either
1122                  * client or server side.
1123                  */
1124                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
1125                         CERROR("odd offset %d in page %d\n",
1126                                BD_GET_KIOV(desc, i).kiov_offset, i);
1127                         return GSS_S_FAILURE;
1128                 }
1129
1130                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
1131                         BD_GET_KIOV(desc, i).kiov_offset;
1132                 BD_GET_ENC_KIOV(desc, i).kiov_len =
1133                         (BD_GET_KIOV(desc, i).kiov_len +
1134                          blocksize - 1) & (~(blocksize - 1));
1135         }
1136
1137         return GSS_S_COMPLETE;
1138 }
1139
1140 static
1141 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1142                              struct ptlrpc_bulk_desc *desc,
1143                              rawobj_t *token, int adj_nob)
1144 {
1145         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1146         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1147         struct krb5_header  *khdr;
1148         int                  blocksize;
1149         rawobj_t             cksum = RAWOBJ_EMPTY;
1150         rawobj_t             data_desc[1], cipher;
1151         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
1152         int rc = 0;
1153         u32 major;
1154
1155         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1156         LASSERT(ke);
1157         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1158
1159         /*
1160          * final token format:
1161          * --------------------------------------------------
1162          * | krb5 header | head/tail cipher text | checksum |
1163          * --------------------------------------------------
1164          */
1165
1166         /* fill krb5 header */
1167         LASSERT(token->len >= sizeof(*khdr));
1168         khdr = (struct krb5_header *)token->data;
1169         fill_krb5_header(kctx, khdr, 1);
1170
1171         /* generate confounder */
1172         get_random_bytes(conf, ke->ke_conf_size);
1173
1174         /* get encryption blocksize. note kc_keye might not associated with
1175          * a tfm, currently only for arcfour-hmac */
1176         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1177                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1178                 blocksize = 1;
1179         } else {
1180                 LASSERT(kctx->kc_keye.kb_tfm);
1181                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1182         }
1183
1184         /*
1185          * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1186          * the bulk token size would be exactly (sizeof(krb5_header) +
1187          * blocksize + sizeof(krb5_header) + hashsize)
1188          */
1189         LASSERT(blocksize <= ke->ke_conf_size);
1190         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1191         LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1192
1193         /*
1194          * clear text layout for checksum:
1195          * ------------------------------------------
1196          * | confounder | clear pages | krb5 header |
1197          * ------------------------------------------
1198          */
1199         data_desc[0].data = conf;
1200         data_desc[0].len = ke->ke_conf_size;
1201
1202         /* compute checksum */
1203         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1204                                khdr, 1, data_desc,
1205                                desc->bd_iov_count, GET_KIOV(desc),
1206                                &cksum, gctx->hash_func))
1207                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1208         LASSERT(cksum.len >= ke->ke_hash_size);
1209
1210         /*
1211          * clear text layout for encryption:
1212          * ------------------------------------------
1213          * | confounder | clear pages | krb5 header |
1214          * ------------------------------------------
1215          *        |              |             |
1216          *        ----------  (cipher pages)   |
1217          * result token:   |                   |
1218          * -------------------------------------------
1219          * | krb5 header | cipher text | cipher text |
1220          * -------------------------------------------
1221          */
1222         data_desc[0].data = conf;
1223         data_desc[0].len = ke->ke_conf_size;
1224
1225         cipher.data = (__u8 *)(khdr + 1);
1226         cipher.len = blocksize + sizeof(*khdr);
1227
1228         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1229                 LBUG();
1230                 rc = 0;
1231         } else {
1232                 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1233                                        conf, desc, &cipher, adj_nob);
1234         }
1235         if (rc)
1236                 GOTO(out_free_cksum, major = GSS_S_FAILURE);
1237
1238         /* fill in checksum */
1239         LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1240         memcpy((char *)(khdr + 1) + cipher.len,
1241                cksum.data + cksum.len - ke->ke_hash_size,
1242                ke->ke_hash_size);
1243
1244         /* final token length */
1245         token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1246         major = GSS_S_COMPLETE;
1247 out_free_cksum:
1248         rawobj_free(&cksum);
1249         return major;
1250 }
1251
1252 static
1253 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1254                           rawobj_t        *gsshdr,
1255                           rawobj_t        *token,
1256                           rawobj_t        *msg)
1257 {
1258         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1259         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1260         struct krb5_header  *khdr;
1261         unsigned char       *tmpbuf;
1262         int                  blocksize, bodysize;
1263         rawobj_t             cksum = RAWOBJ_EMPTY;
1264         rawobj_t             cipher_in, plain_out;
1265         rawobj_t             hash_objs[3];
1266         int                  rc = 0;
1267         __u32                major;
1268         __u8                 local_iv[16] = {0};
1269
1270         LASSERT(ke);
1271
1272         if (token->len < sizeof(*khdr)) {
1273                 CERROR("short signature: %u\n", token->len);
1274                 return GSS_S_DEFECTIVE_TOKEN;
1275         }
1276
1277         khdr = (struct krb5_header *)token->data;
1278
1279         major = verify_krb5_header(kctx, khdr, 1);
1280         if (major != GSS_S_COMPLETE) {
1281                 CERROR("bad krb5 header\n");
1282                 return major;
1283         }
1284
1285         /* block size */
1286         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1287                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1288                 blocksize = 1;
1289         } else {
1290                 LASSERT(kctx->kc_keye.kb_tfm);
1291                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1292         }
1293
1294         /* expected token layout:
1295          * ----------------------------------------
1296          * | krb5 header | cipher text | checksum |
1297          * ----------------------------------------
1298          */
1299         bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1300
1301         if (bodysize % blocksize) {
1302                 CERROR("odd bodysize %d\n", bodysize);
1303                 return GSS_S_DEFECTIVE_TOKEN;
1304         }
1305
1306         if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1307                 CERROR("incomplete token: bodysize %d\n", bodysize);
1308                 return GSS_S_DEFECTIVE_TOKEN;
1309         }
1310
1311         if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1312                 CERROR("buffer too small: %u, require %d\n",
1313                        msg->len, bodysize - ke->ke_conf_size);
1314                 return GSS_S_FAILURE;
1315         }
1316
1317         /* decrypting */
1318         OBD_ALLOC_LARGE(tmpbuf, bodysize);
1319         if (!tmpbuf)
1320                 return GSS_S_FAILURE;
1321
1322         major = GSS_S_FAILURE;
1323
1324         cipher_in.data = (__u8 *)(khdr + 1);
1325         cipher_in.len = bodysize;
1326         plain_out.data = tmpbuf;
1327         plain_out.len = bodysize;
1328
1329         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1330                 rawobj_t                 arc4_keye;
1331                 struct crypto_blkcipher *arc4_tfm;
1332
1333                 cksum.data = token->data + token->len - ke->ke_hash_size;
1334                 cksum.len = ke->ke_hash_size;
1335
1336                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1337                                        NULL, 1, &cksum, 0, NULL, &arc4_keye,
1338                                        gctx->hash_func)) {
1339                         CERROR("failed to obtain arc4 enc key\n");
1340                         GOTO(arc4_out, rc = -EACCES);
1341                 }
1342
1343                 arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1344                 if (IS_ERR(arc4_tfm)) {
1345                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
1346                         GOTO(arc4_out_key, rc = -EACCES);
1347                 }
1348
1349                 if (crypto_blkcipher_setkey(arc4_tfm,
1350                                             arc4_keye.data, arc4_keye.len)) {
1351                         CERROR("failed to set arc4 key, len %d\n",
1352                                arc4_keye.len);
1353                         GOTO(arc4_out_tfm, rc = -EACCES);
1354                 }
1355
1356                 rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
1357                                        &plain_out, 0);
1358 arc4_out_tfm:
1359                 crypto_free_blkcipher(arc4_tfm);
1360 arc4_out_key:
1361                 rawobj_free(&arc4_keye);
1362 arc4_out:
1363                 cksum = RAWOBJ_EMPTY;
1364         } else {
1365                 rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1,
1366                                        &cipher_in, &plain_out, 0);
1367         }
1368
1369         if (rc != 0) {
1370                 CERROR("error decrypt\n");
1371                 goto out_free;
1372         }
1373         LASSERT(plain_out.len == bodysize);
1374
1375         /* expected clear text layout:
1376          * -----------------------------------------
1377          * | confounder | clear msgs | krb5 header |
1378          * -----------------------------------------
1379          */
1380
1381         /* verify krb5 header in token is not modified */
1382         if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1383                    sizeof(*khdr))) {
1384                 CERROR("decrypted krb5 header mismatch\n");
1385                 goto out_free;
1386         }
1387
1388         /* verify checksum, compose clear text as layout:
1389          * ------------------------------------------------------
1390          * | confounder | gss header | clear msgs | krb5 header |
1391          * ------------------------------------------------------
1392          */
1393         hash_objs[0].len = ke->ke_conf_size;
1394         hash_objs[0].data = plain_out.data;
1395         hash_objs[1].len = gsshdr->len;
1396         hash_objs[1].data = gsshdr->data;
1397         hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1398         hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1399         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1400                                khdr, 3, hash_objs, 0, NULL, &cksum,
1401                                gctx->hash_func))
1402                 goto out_free;
1403
1404         LASSERT(cksum.len >= ke->ke_hash_size);
1405         if (memcmp((char *)(khdr + 1) + bodysize,
1406                    cksum.data + cksum.len - ke->ke_hash_size,
1407                    ke->ke_hash_size)) {
1408                 CERROR("checksum mismatch\n");
1409                 goto out_free;
1410         }
1411
1412         msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1413         memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1414
1415         major = GSS_S_COMPLETE;
1416 out_free:
1417         OBD_FREE_LARGE(tmpbuf, bodysize);
1418         rawobj_free(&cksum);
1419         return major;
1420 }
1421
1422 static
1423 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1424                                struct ptlrpc_bulk_desc *desc,
1425                                rawobj_t *token, int adj_nob)
1426 {
1427         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1428         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1429         struct krb5_header  *khdr;
1430         int                  blocksize;
1431         rawobj_t             cksum = RAWOBJ_EMPTY;
1432         rawobj_t             cipher, plain;
1433         rawobj_t             data_desc[1];
1434         int                  rc;
1435         __u32                major;
1436
1437         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1438         LASSERT(ke);
1439
1440         if (token->len < sizeof(*khdr)) {
1441                 CERROR("short signature: %u\n", token->len);
1442                 return GSS_S_DEFECTIVE_TOKEN;
1443         }
1444
1445         khdr = (struct krb5_header *)token->data;
1446
1447         major = verify_krb5_header(kctx, khdr, 1);
1448         if (major != GSS_S_COMPLETE) {
1449                 CERROR("bad krb5 header\n");
1450                 return major;
1451         }
1452
1453         /* block size */
1454         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1455                 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1456                 blocksize = 1;
1457                 LBUG();
1458         } else {
1459                 LASSERT(kctx->kc_keye.kb_tfm);
1460                 blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1461         }
1462         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1463
1464         /*
1465          * token format is expected as:
1466          * -----------------------------------------------
1467          * | krb5 header | head/tail cipher text | cksum |
1468          * -----------------------------------------------
1469          */
1470         if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1471             ke->ke_hash_size) {
1472                 CERROR("short token size: %u\n", token->len);
1473                 return GSS_S_DEFECTIVE_TOKEN;
1474         }
1475
1476         cipher.data = (__u8 *) (khdr + 1);
1477         cipher.len = blocksize + sizeof(*khdr);
1478         plain.data = cipher.data;
1479         plain.len = cipher.len;
1480
1481         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1482                                desc, &cipher, &plain, adj_nob);
1483         if (rc)
1484                 return GSS_S_DEFECTIVE_TOKEN;
1485
1486         /*
1487          * verify checksum, compose clear text as layout:
1488          * ------------------------------------------
1489          * | confounder | clear pages | krb5 header |
1490          * ------------------------------------------
1491          */
1492         data_desc[0].data = plain.data;
1493         data_desc[0].len = blocksize;
1494
1495         if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1496                                khdr, 1, data_desc,
1497                                desc->bd_iov_count,
1498                                GET_KIOV(desc),
1499                                &cksum, gctx->hash_func))
1500                 return GSS_S_FAILURE;
1501         LASSERT(cksum.len >= ke->ke_hash_size);
1502
1503         if (memcmp(plain.data + blocksize + sizeof(*khdr),
1504                    cksum.data + cksum.len - ke->ke_hash_size,
1505                    ke->ke_hash_size)) {
1506                 CERROR("checksum mismatch\n");
1507                 rawobj_free(&cksum);
1508                 return GSS_S_BAD_SIG;
1509         }
1510
1511         rawobj_free(&cksum);
1512         return GSS_S_COMPLETE;
1513 }
1514
1515 int gss_display_kerberos(struct gss_ctx        *ctx,
1516                          char                  *buf,
1517                          int                    bufsize)
1518 {
1519         struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1520         int                 written;
1521
1522         written = snprintf(buf, bufsize, "krb5 (%s)",
1523                            enctype2str(kctx->kc_enctype));
1524         return written;
1525 }
1526
1527 static struct gss_api_ops gss_kerberos_ops = {
1528         .gss_import_sec_context     = gss_import_sec_context_kerberos,
1529         .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1530         .gss_inquire_context        = gss_inquire_context_kerberos,
1531         .gss_get_mic                = gss_get_mic_kerberos,
1532         .gss_verify_mic             = gss_verify_mic_kerberos,
1533         .gss_wrap                   = gss_wrap_kerberos,
1534         .gss_unwrap                 = gss_unwrap_kerberos,
1535         .gss_prep_bulk              = gss_prep_bulk_kerberos,
1536         .gss_wrap_bulk              = gss_wrap_bulk_kerberos,
1537         .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1538         .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1539         .gss_display                = gss_display_kerberos,
1540 };
1541
1542 static struct subflavor_desc gss_kerberos_sfs[] = {
1543         {
1544                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1545                 .sf_qop         = 0,
1546                 .sf_service     = SPTLRPC_SVC_NULL,
1547                 .sf_name        = "krb5n"
1548         },
1549         {
1550                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1551                 .sf_qop         = 0,
1552                 .sf_service     = SPTLRPC_SVC_AUTH,
1553                 .sf_name        = "krb5a"
1554         },
1555         {
1556                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1557                 .sf_qop         = 0,
1558                 .sf_service     = SPTLRPC_SVC_INTG,
1559                 .sf_name        = "krb5i"
1560         },
1561         {
1562                 .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1563                 .sf_qop         = 0,
1564                 .sf_service     = SPTLRPC_SVC_PRIV,
1565                 .sf_name        = "krb5p"
1566         },
1567 };
1568
1569 static struct gss_api_mech gss_kerberos_mech = {
1570         /* .gm_owner uses default NULL value for THIS_MODULE */
1571         .gm_name        = "krb5",
1572         .gm_oid         = (rawobj_t)
1573                                 {9, "\052\206\110\206\367\022\001\002\002"},
1574         .gm_ops         = &gss_kerberos_ops,
1575         .gm_sf_num      = 4,
1576         .gm_sfs         = gss_kerberos_sfs,
1577 };
1578
1579 int __init init_kerberos_module(void)
1580 {
1581         int status;
1582
1583         status = lgss_mech_register(&gss_kerberos_mech);
1584         if (status)
1585                 CERROR("Failed to register kerberos gss mechanism!\n");
1586         return status;
1587 }
1588
1589 void cleanup_kerberos_module(void)
1590 {
1591         lgss_mech_unregister(&gss_kerberos_mech);
1592 }