Whamcloud - gitweb
LU-3289 gss: Fix for SK bulk HMACs
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_sk_mech.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (C) 2013, 2015, Trustees of Indiana University
24  *
25  * Copyright (c) 2014, Intel Corporation.
26  *
27  * Author: Jeremy Filizetti <jfilizet@iu.edu>
28  * Author: Andrew Korty <ajk@iu.edu>
29  */
30
31 #define DEBUG_SUBSYSTEM S_SEC
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/crypto.h>
36 #include <linux/mutex.h>
37
38 #include <obd.h>
39 #include <obd_class.h>
40 #include <obd_support.h>
41 #include <lustre/lustre_user.h>
42
43 #include "gss_err.h"
44 #include "gss_crypto.h"
45 #include "gss_internal.h"
46 #include "gss_api.h"
47 #include "gss_asn1.h"
48
49 #define SK_INTERFACE_VERSION 1
50 #define SK_MIN_SIZE 8
51
52 struct sk_ctx {
53         __u32                   sc_version;
54         __u16                   sc_hmac;
55         __u16                   sc_crypt;
56         __u32                   sc_expire;
57         rawobj_t                sc_shared_key;
58         rawobj_t                sc_iv;
59         struct gss_keyblock     sc_session_kb;
60 };
61
62 static struct sk_crypt_type sk_crypt_types[] = {
63         [SK_CRYPT_AES256_CTR] = {
64                 .sct_name = "ctr(aes256)",
65                 .sct_bytes = 32,
66         },
67 };
68
69 static struct sk_hmac_type sk_hmac_types[] = {
70         [SK_HMAC_SHA256] = {
71                 .sht_name = "hmac(sha256)",
72                 .sht_bytes = 32,
73         },
74         [SK_HMAC_SHA512] = {
75                 .sht_name = "hmac(sha512)",
76                 .sht_bytes = 64,
77         },
78 };
79
80 static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
81 {
82         return (len + blocksize - 1) & (~(blocksize - 1));
83 }
84
85 static int sk_init_keys(struct sk_ctx *skc)
86 {
87         int rc;
88         unsigned int ivsize;
89
90         rc = gss_keyblock_init(&skc->sc_session_kb,
91                                sk_crypt_types[skc->sc_crypt].sct_name, 0);
92         if (rc)
93                 return rc;
94
95         ivsize = crypto_blkcipher_ivsize(skc->sc_session_kb.kb_tfm);
96         if (skc->sc_iv.len != ivsize) {
97                 CERROR("IV size for algorithm (%d) does not match provided IV "
98                        "size: %d\n", ivsize, skc->sc_iv.len);
99                 return -EINVAL;
100         }
101
102         crypto_blkcipher_set_iv(skc->sc_session_kb.kb_tfm,
103                                        skc->sc_iv.data, skc->sc_iv.len);
104
105         return 0;
106 }
107
108 static int fill_sk_context(rawobj_t *inbuf, struct sk_ctx *skc)
109 {
110         char *ptr = inbuf->data;
111         char *end = inbuf->data + inbuf->len;
112         __u32 tmp;
113
114         /* see sk_serialize_kctx() for format from userspace side */
115         /*  1. Version */
116         if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
117                 CERROR("Failed to read shared key interface version");
118                 return -1;
119         }
120         if (tmp != SK_INTERFACE_VERSION) {
121                 CERROR("Invalid shared key interface version: %d\n", tmp);
122                 return -1;
123         }
124
125         /* 2. HMAC type */
126         if (gss_get_bytes(&ptr, end, &skc->sc_hmac, sizeof(skc->sc_hmac))) {
127                 CERROR("Failed to read HMAC algorithm type");
128                 return -1;
129         }
130         if (skc->sc_hmac >= SK_HMAC_MAX) {
131                 CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
132                 return -1;
133         }
134
135         /* 3. crypt type */
136         if (gss_get_bytes(&ptr, end, &skc->sc_crypt, sizeof(skc->sc_crypt))) {
137                 CERROR("Failed to read crypt algorithm type");
138                 return -1;
139         }
140         if (skc->sc_crypt >= SK_CRYPT_MAX) {
141                 CERROR("Invalid crypt type: %d\n", skc->sc_crypt);
142                 return -1;
143         }
144
145         /* 4. expiration time */
146         if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
147                 CERROR("Failed to read context expiration time");
148                 return -1;
149         }
150         skc->sc_expire = tmp + cfs_time_current_sec();
151
152         /* 5. Shared key */
153         if (gss_get_rawobj(&ptr, end, &skc->sc_shared_key)) {
154                 CERROR("Failed to read shared key");
155                 return -1;
156         }
157         if (skc->sc_shared_key.len <= SK_MIN_SIZE) {
158                 CERROR("Shared key must key must be larger than %d bytes\n",
159                        SK_MIN_SIZE);
160                 return -1;
161         }
162
163         /* 6. IV, can be empty if not using privacy mode */
164         if (gss_get_rawobj(&ptr, end, &skc->sc_iv)) {
165                 CERROR("Failed to read initialization vector ");
166                 return -1;
167         }
168
169         /* 7. Session key, can be empty if not using privacy mode */
170         if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
171                 CERROR("Failed to read session key");
172                 return -1;
173         }
174
175         return 0;
176 }
177
178 static void delete_sk_context(struct sk_ctx *skc)
179 {
180         if (!skc)
181                 return;
182         gss_keyblock_free(&skc->sc_session_kb);
183         rawobj_free(&skc->sc_iv);
184         rawobj_free(&skc->sc_shared_key);
185 }
186
187 static
188 __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context)
189 {
190         struct sk_ctx *skc;
191         bool privacy = false;
192
193         if (inbuf == NULL || inbuf->data == NULL)
194                 return GSS_S_FAILURE;
195
196         OBD_ALLOC_PTR(skc);
197         if (!skc)
198                 return GSS_S_FAILURE;
199
200         if (fill_sk_context(inbuf, skc))
201                 goto out_error;
202
203         /* Only privacy mode needs to initialize keys */
204         if (skc->sc_session_kb.kb_key.len > 0) {
205                 privacy = true;
206                 if (sk_init_keys(skc))
207                         goto out_error;
208         }
209
210         gss_context->internal_ctx_id = skc;
211         CDEBUG(D_SEC, "successfully imported sk%s context\n",
212                privacy ? "pi" : "i");
213
214         return GSS_S_COMPLETE;
215
216 out_error:
217         delete_sk_context(skc);
218         OBD_FREE_PTR(skc);
219         return GSS_S_FAILURE;
220 }
221
222 static
223 __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old,
224                                   struct gss_ctx *gss_context_new)
225 {
226         struct sk_ctx *skc_old = gss_context_old->internal_ctx_id;
227         struct sk_ctx *skc_new;
228
229         OBD_ALLOC_PTR(skc_new);
230         if (!skc_new)
231                 return GSS_S_FAILURE;
232
233         skc_new->sc_crypt = skc_old->sc_crypt;
234         skc_new->sc_hmac = skc_old->sc_hmac;
235         skc_new->sc_expire = skc_old->sc_expire;
236         if (rawobj_dup(&skc_new->sc_shared_key, &skc_old->sc_shared_key))
237                 goto out_error;
238         if (rawobj_dup(&skc_new->sc_iv, &skc_old->sc_iv))
239                 goto out_error;
240         if (gss_keyblock_dup(&skc_new->sc_session_kb, &skc_old->sc_session_kb))
241                 goto out_error;
242
243         /* Only privacy mode needs to initialize keys */
244         if (skc_new->sc_session_kb.kb_key.len > 0)
245                 if (sk_init_keys(skc_new))
246                         goto out_error;
247
248         gss_context_new->internal_ctx_id = skc_new;
249         CDEBUG(D_SEC, "successfully copied reverse sk context\n");
250
251         return GSS_S_COMPLETE;
252
253 out_error:
254         delete_sk_context(skc_new);
255         OBD_FREE_PTR(skc_new);
256         return GSS_S_FAILURE;
257 }
258
259 static
260 __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
261                              unsigned long *endtime)
262 {
263         struct sk_ctx *skc = gss_context->internal_ctx_id;
264
265         *endtime = skc->sc_expire;
266         return GSS_S_COMPLETE;
267 }
268
269 static
270 __u32 sk_make_hmac(char *alg_name, rawobj_t *key, int msg_count, rawobj_t *msgs,
271                    int iov_count, lnet_kiov_t *iovs, rawobj_t *token)
272 {
273         struct crypto_hash *tfm;
274         int rc;
275
276         tfm = crypto_alloc_hash(alg_name, 0, 0);
277         if (IS_ERR(tfm))
278                 return GSS_S_FAILURE;
279
280         rc = GSS_S_FAILURE;
281         LASSERT(token->len >= crypto_hash_digestsize(tfm));
282         if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
283                             token))
284                 rc = GSS_S_COMPLETE;
285
286         crypto_free_hash(tfm);
287         return rc;
288 }
289
290 static
291 __u32 gss_get_mic_sk(struct gss_ctx *gss_context,
292                      int message_count,
293                      rawobj_t *messages,
294                      int iov_count,
295                      lnet_kiov_t *iovs,
296                      rawobj_t *token)
297 {
298         struct sk_ctx *skc = gss_context->internal_ctx_id;
299         return sk_make_hmac(sk_hmac_types[skc->sc_hmac].sht_name,
300                             &skc->sc_shared_key, message_count, messages,
301                             iov_count, iovs, token);
302 }
303
304 static
305 __u32 sk_verify_hmac(struct sk_hmac_type *sht, rawobj_t *key, int message_count,
306                          rawobj_t *messages, int iov_count, lnet_kiov_t *iovs,
307                          rawobj_t *token)
308 {
309         rawobj_t checksum = RAWOBJ_EMPTY;
310         __u32 rc = GSS_S_FAILURE;
311
312         checksum.len = sht->sht_bytes;
313         if (token->len < checksum.len) {
314                 CDEBUG(D_SEC, "Token received too short, expected %d "
315                        "received %d\n", token->len, checksum.len);
316                 return GSS_S_DEFECTIVE_TOKEN;
317         }
318
319         OBD_ALLOC_LARGE(checksum.data, checksum.len);
320         if (!checksum.data)
321                 return rc;
322
323         if (sk_make_hmac(sht->sht_name, key, message_count, messages,
324                          iov_count, iovs, &checksum)) {
325                 CDEBUG(D_SEC, "Failed to create checksum to validate\n");
326                 goto cleanup;
327         }
328
329         if (memcmp(token->data, checksum.data, checksum.len)) {
330                 CERROR("checksum mismatch\n");
331                 rc = GSS_S_BAD_SIG;
332                 goto cleanup;
333         }
334
335         rc = GSS_S_COMPLETE;
336
337 cleanup:
338         OBD_FREE(checksum.data, checksum.len);
339         return rc;
340 }
341
342 /* sk_verify_bulk_hmac() differs slightly from sk_verify_hmac() because all
343  * encrypted pages in the bulk descriptor are populated although we only need
344  * to decrypt up to the number of bytes actually specified from the sender
345  * (bd_nob) otherwise the calulated HMAC will be incorrect. */
346 static
347 __u32 sk_verify_bulk_hmac(struct sk_hmac_type *sht, rawobj_t *key,
348                           int msgcnt, rawobj_t *msgs, int iovcnt,
349                           lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token)
350 {
351         rawobj_t checksum = RAWOBJ_EMPTY;
352         struct crypto_hash *tfm;
353         struct hash_desc desc = {
354                 .tfm = NULL,
355                 .flags = 0,
356         };
357         struct scatterlist sg[1];
358         struct sg_table sgt;
359         int bytes;
360         int i;
361         int rc = GSS_S_FAILURE;
362
363         checksum.len = sht->sht_bytes;
364         if (token->len < checksum.len) {
365                 CDEBUG(D_SEC, "Token received too short, expected %d "
366                        "received %d\n", token->len, checksum.len);
367                 return GSS_S_DEFECTIVE_TOKEN;
368         }
369
370         OBD_ALLOC_LARGE(checksum.data, checksum.len);
371         if (!checksum.data)
372                 return rc;
373
374         tfm = crypto_alloc_hash(sht->sht_name, 0, 0);
375         if (IS_ERR(tfm))
376                 goto cleanup;
377
378         desc.tfm = tfm;
379
380         LASSERT(token->len >= crypto_hash_digestsize(tfm));
381
382         rc = crypto_hash_setkey(tfm, key->data, key->len);
383         if (rc)
384                 goto hash_cleanup;
385
386         rc = crypto_hash_init(&desc);
387         if (rc)
388                 goto hash_cleanup;
389
390         for (i = 0; i < msgcnt; i++) {
391                 if (msgs[i].len == 0)
392                         continue;
393
394                 rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
395                 if (rc != 0)
396                         goto hash_cleanup;
397
398                 rc = crypto_hash_update(&desc, sg, msgs[i].len);
399                 if (rc) {
400                         gss_teardown_sgtable(&sgt);
401                         goto hash_cleanup;
402                 }
403
404                 gss_teardown_sgtable(&sgt);
405         }
406
407         for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
408                 if (iovs[i].kiov_len == 0)
409                         continue;
410
411                 bytes = min_t(int, iov_bytes, iovs[i].kiov_len);
412                 iov_bytes -= bytes;
413
414                 sg_init_table(sg, 1);
415                 sg_set_page(&sg[0], iovs[i].kiov_page, bytes,
416                             iovs[i].kiov_offset);
417                 rc = crypto_hash_update(&desc, sg, bytes);
418                 if (rc)
419                         goto hash_cleanup;
420         }
421
422         crypto_hash_final(&desc, checksum.data);
423
424         if (memcmp(token->data, checksum.data, checksum.len)) {
425                 rc = GSS_S_BAD_SIG;
426                 goto hash_cleanup;
427         }
428
429         rc = GSS_S_COMPLETE;
430
431 hash_cleanup:
432         crypto_free_hash(tfm);
433
434 cleanup:
435         OBD_FREE_LARGE(checksum.data, checksum.len);
436
437         return rc;
438 }
439
440 static
441 __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
442                         int message_count,
443                         rawobj_t *messages,
444                         int iov_count,
445                         lnet_kiov_t *iovs,
446                         rawobj_t *token)
447 {
448         struct sk_ctx *skc = gss_context->internal_ctx_id;
449         return sk_verify_hmac(&sk_hmac_types[skc->sc_hmac], &skc->sc_shared_key,
450                               message_count, messages, iov_count, iovs, token);
451 }
452
453 static
454 __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
455                     rawobj_t *message, int message_buffer_length,
456                     rawobj_t *token)
457 {
458         struct sk_ctx *skc = gss_context->internal_ctx_id;
459         struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
460         rawobj_t msgbufs[2];
461         rawobj_t cipher;
462         rawobj_t checksum;
463         unsigned int blocksize;
464
465         LASSERT(skc->sc_session_kb.kb_tfm);
466         blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
467
468         if (gss_add_padding(message, message_buffer_length, blocksize))
469                 return GSS_S_FAILURE;
470
471         /* Only encrypting the message data */
472         cipher.data = token->data;
473         cipher.len = token->len - sht->sht_bytes;
474         if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, 0, 1, message,
475                               &cipher, 1))
476                 return GSS_S_FAILURE;
477
478         /* Checksum covers the GSS header followed by the encrypted message */
479         msgbufs[0].len = gss_header->len;
480         msgbufs[0].data = gss_header->data;
481         msgbufs[1].len = cipher.len;
482         msgbufs[1].data = cipher.data;
483
484         LASSERT(cipher.len + sht->sht_bytes <= token->len);
485         checksum.data = token->data + cipher.len;
486         checksum.len = sht->sht_bytes;
487         if (sk_make_hmac(sht->sht_name, &skc->sc_shared_key, 2, msgbufs, 0,
488                          NULL, &checksum))
489                 return GSS_S_FAILURE;
490
491         token->len = cipher.len + checksum.len;
492
493         return GSS_S_COMPLETE;
494 }
495
496 static
497 __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
498                       rawobj_t *token, rawobj_t *message)
499 {
500         struct sk_ctx *skc = gss_context->internal_ctx_id;
501         struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
502         rawobj_t msgbufs[2];
503         rawobj_t cipher;
504         rawobj_t checksum;
505         unsigned int blocksize;
506         int rc;
507
508         LASSERT(skc->sc_session_kb.kb_tfm);
509         blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
510
511         if (token->len < sht->sht_bytes)
512                 return GSS_S_DEFECTIVE_TOKEN;
513
514         cipher.data = token->data;
515         cipher.len = token->len - sht->sht_bytes;
516         checksum.data = token->data + cipher.len;
517         checksum.len = sht->sht_bytes;
518
519         if (cipher.len % blocksize != 0)
520                 return GSS_S_DEFECTIVE_TOKEN;
521
522         /* Checksum covers the GSS header followed by the encrypted message */
523         msgbufs[0].len = gss_header->len;
524         msgbufs[0].data = gss_header->data;
525         msgbufs[1].len = cipher.len;
526         msgbufs[1].data = cipher.data;
527         rc = sk_verify_hmac(sht, &skc->sc_shared_key, 2, msgbufs, 0, NULL,
528                             &checksum);
529         if (rc)
530                 return rc;
531
532         message->len = cipher.len;
533         if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, 0, 1, &cipher,
534                               message, 0))
535                 return GSS_S_FAILURE;
536
537         return GSS_S_COMPLETE;
538 }
539
540 static
541 __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
542                        struct ptlrpc_bulk_desc *desc)
543 {
544         struct sk_ctx *skc = gss_context->internal_ctx_id;
545         int blocksize;
546         int i;
547
548         LASSERT(skc->sc_session_kb.kb_tfm);
549         blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
550
551         for (i = 0; i < desc->bd_iov_count; i++) {
552                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
553                         CERROR("offset %d not blocksize aligned\n",
554                                BD_GET_KIOV(desc, i).kiov_offset);
555                         return GSS_S_FAILURE;
556                 }
557
558                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
559                         BD_GET_KIOV(desc, i).kiov_offset;
560                 BD_GET_ENC_KIOV(desc, i).kiov_len =
561                         sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize);
562         }
563
564         return GSS_S_COMPLETE;
565 }
566
567 static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm,
568                              struct ptlrpc_bulk_desc *desc,
569                              rawobj_t *cipher,
570                              int adj_nob)
571 {
572         struct blkcipher_desc cdesc = {
573                 .tfm = tfm,
574                 .info = NULL,
575                 .flags = 0,
576         };
577         struct scatterlist ptxt;
578         struct scatterlist ctxt;
579         int blocksize;
580         int i;
581         int rc;
582         int nob = 0;
583
584         blocksize = crypto_blkcipher_blocksize(tfm);
585
586         sg_init_table(&ptxt, 1);
587         sg_init_table(&ctxt, 1);
588
589         for (i = 0; i < desc->bd_iov_count; i++) {
590                 sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page,
591                             sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
592                                           blocksize),
593                             BD_GET_KIOV(desc, i).kiov_offset);
594                 if (adj_nob)
595                         nob += ptxt.length;
596
597                 sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
598                             ptxt.length, ptxt.offset);
599
600                 BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
601                 BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
602
603                 rc = crypto_blkcipher_encrypt(&cdesc, &ctxt, &ptxt,
604                                               ptxt.length);
605                 if (rc) {
606                         CERROR("failed to encrypt page: %d\n", rc);
607                         return rc;
608                 }
609         }
610
611         if (adj_nob)
612                 desc->bd_nob = nob;
613
614         return 0;
615 }
616
617 static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm,
618                              struct ptlrpc_bulk_desc *desc,
619                              rawobj_t *cipher,
620                              int adj_nob)
621 {
622         struct blkcipher_desc cdesc = {
623                 .tfm = tfm,
624                 .info = NULL,
625                 .flags = 0,
626         };
627         struct scatterlist ptxt;
628         struct scatterlist ctxt;
629         int blocksize;
630         int i;
631         int rc;
632         int pnob = 0;
633         int cnob = 0;
634
635         sg_init_table(&ptxt, 1);
636         sg_init_table(&ctxt, 1);
637
638         blocksize = crypto_blkcipher_blocksize(tfm);
639         if (desc->bd_nob_transferred % blocksize != 0) {
640                 CERROR("Transfer not a multiple of block size: %d\n",
641                        desc->bd_nob_transferred);
642                 return GSS_S_DEFECTIVE_TOKEN;
643         }
644
645         for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
646              i++) {
647                 lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
648                 lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
649
650                 if (ciov->kiov_offset % blocksize != 0 ||
651                     ciov->kiov_len % blocksize != 0) {
652                         CERROR("Invalid bulk descriptor vector\n");
653                         return GSS_S_DEFECTIVE_TOKEN;
654                 }
655
656                 /* Must adjust bytes here because we know the actual sizes after
657                  * decryption.  Similar to what gss_cli_ctx_unwrap_bulk does for
658                  * integrity only mode */
659                 if (adj_nob) {
660                         /* cipher text must not exceed transferred size */
661                         if (ciov->kiov_len + cnob > desc->bd_nob_transferred)
662                                 ciov->kiov_len =
663                                         desc->bd_nob_transferred - cnob;
664
665                         piov->kiov_len = ciov->kiov_len;
666
667                         /* plain text must not exceed bulk's size */
668                         if (ciov->kiov_len + pnob > desc->bd_nob)
669                                 piov->kiov_len = desc->bd_nob - pnob;
670                 } else {
671                         /* Taken from krb5_decrypt since it was not verified
672                          * whether or not LNET guarantees these */
673                         if (ciov->kiov_len + cnob > desc->bd_nob_transferred ||
674                             piov->kiov_len > ciov->kiov_len) {
675                                 CERROR("Invalid decrypted length\n");
676                                 return GSS_S_FAILURE;
677                         }
678                 }
679
680                 if (ciov->kiov_len == 0)
681                         continue;
682
683                 sg_init_table(&ctxt, 1);
684                 sg_set_page(&ctxt, ciov->kiov_page, ciov->kiov_len,
685                             ciov->kiov_offset);
686                 ptxt = ctxt;
687
688                 /* In the event the plain text size is not a multiple
689                  * of blocksize we decrypt in place and copy the result
690                  * after the decryption */
691                 if (piov->kiov_len % blocksize == 0)
692                         sg_assign_page(&ptxt, piov->kiov_page);
693
694                 rc = crypto_blkcipher_decrypt(&cdesc, &ptxt, &ctxt,
695                                               ctxt.length);
696                 if (rc) {
697                         CERROR("Decryption failed for page: %d\n", rc);
698                         return GSS_S_FAILURE;
699                 }
700
701                 if (piov->kiov_len % blocksize != 0) {
702                         memcpy(page_address(piov->kiov_page) +
703                                piov->kiov_offset,
704                                page_address(ciov->kiov_page) +
705                                ciov->kiov_offset,
706                                piov->kiov_len);
707                 }
708
709                 cnob += ciov->kiov_len;
710                 pnob += piov->kiov_len;
711         }
712
713         /* if needed, clear up the rest unused iovs */
714         if (adj_nob)
715                 while (i < desc->bd_iov_count)
716                         BD_GET_KIOV(desc, i++).kiov_len = 0;
717
718         if (unlikely(cnob != desc->bd_nob_transferred)) {
719                 CERROR("%d cipher text transferred but only %d decrypted\n",
720                        desc->bd_nob_transferred, cnob);
721                 return GSS_S_FAILURE;
722         }
723
724         if (unlikely(!adj_nob && pnob != desc->bd_nob)) {
725                 CERROR("%d plain text expected but only %d received\n",
726                        desc->bd_nob, pnob);
727                 return GSS_S_FAILURE;
728         }
729
730         return 0;
731 }
732
733 static
734 __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
735                          struct ptlrpc_bulk_desc *desc, rawobj_t *token,
736                          int adj_nob)
737 {
738         struct sk_ctx *skc = gss_context->internal_ctx_id;
739         struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
740         rawobj_t cipher = RAWOBJ_EMPTY;
741         rawobj_t checksum = RAWOBJ_EMPTY;
742
743         cipher.data = token->data;
744         cipher.len = token->len - sht->sht_bytes;
745         memset(token->data, 0, token->len);
746
747         if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, desc, &cipher, adj_nob))
748                 return GSS_S_FAILURE;
749
750         checksum.data = token->data + cipher.len;
751         checksum.len = sht->sht_bytes;
752
753         if (sk_make_hmac(sht->sht_name, &skc->sc_shared_key, 1, &cipher,
754                          desc->bd_iov_count, GET_ENC_KIOV(desc), &checksum))
755                 return GSS_S_FAILURE;
756
757         return GSS_S_COMPLETE;
758 }
759
760 static
761 __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
762                            struct ptlrpc_bulk_desc *desc,
763                            rawobj_t *token, int adj_nob)
764 {
765         struct sk_ctx *skc = gss_context->internal_ctx_id;
766         struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
767         rawobj_t cipher = RAWOBJ_EMPTY;
768         rawobj_t checksum = RAWOBJ_EMPTY;
769         int rc;
770
771         cipher.data = token->data;
772         cipher.len = token->len - sht->sht_bytes;
773         checksum.data = token->data + cipher.len;
774         checksum.len = sht->sht_bytes;
775
776         rc = sk_verify_bulk_hmac(&sk_hmac_types[skc->sc_hmac],
777                                  &skc->sc_shared_key, 1, &cipher,
778                                  desc->bd_iov_count, GET_ENC_KIOV(desc),
779                                  desc->bd_nob, &checksum);
780         if (rc)
781                 return rc;
782
783         rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, desc, &cipher, adj_nob);
784         if (rc)
785                 return rc;
786
787         return GSS_S_COMPLETE;
788 }
789
790 static
791 void gss_delete_sec_context_sk(void *internal_context)
792 {
793         struct sk_ctx *sk_context = internal_context;
794         delete_sk_context(sk_context);
795         OBD_FREE_PTR(sk_context);
796 }
797
798 int gss_display_sk(struct gss_ctx *gss_context, char *buf, int bufsize)
799 {
800         return snprintf(buf, bufsize, "sk");
801 }
802
803 static struct gss_api_ops gss_sk_ops = {
804         .gss_import_sec_context     = gss_import_sec_context_sk,
805         .gss_copy_reverse_context   = gss_copy_reverse_context_sk,
806         .gss_inquire_context        = gss_inquire_context_sk,
807         .gss_get_mic                = gss_get_mic_sk,
808         .gss_verify_mic             = gss_verify_mic_sk,
809         .gss_wrap                   = gss_wrap_sk,
810         .gss_unwrap                 = gss_unwrap_sk,
811         .gss_prep_bulk              = gss_prep_bulk_sk,
812         .gss_wrap_bulk              = gss_wrap_bulk_sk,
813         .gss_unwrap_bulk            = gss_unwrap_bulk_sk,
814         .gss_delete_sec_context     = gss_delete_sec_context_sk,
815         .gss_display                = gss_display_sk,
816 };
817
818 static struct subflavor_desc gss_sk_sfs[] = {
819         {
820                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKN,
821                 .sf_qop         = 0,
822                 .sf_service     = SPTLRPC_SVC_NULL,
823                 .sf_name        = "skn"
824         },
825         {
826                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKA,
827                 .sf_qop         = 0,
828                 .sf_service     = SPTLRPC_SVC_AUTH,
829                 .sf_name        = "ska"
830         },
831         {
832                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKI,
833                 .sf_qop         = 0,
834                 .sf_service     = SPTLRPC_SVC_INTG,
835                 .sf_name        = "ski"
836         },
837         {
838                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKPI,
839                 .sf_qop         = 0,
840                 .sf_service     = SPTLRPC_SVC_PRIV,
841                 .sf_name        = "skpi"
842         },
843 };
844
845 /*
846  * currently we leave module owner NULL
847  */
848 static struct gss_api_mech gss_sk_mech = {
849         .gm_owner       = NULL, /*THIS_MODULE, */
850         .gm_name        = "sk",
851         .gm_oid         = (rawobj_t) {
852                 12,
853                 "\053\006\001\004\001\311\146\215\126\001\000\001",
854         },
855         .gm_ops         = &gss_sk_ops,
856         .gm_sf_num      = 4,
857         .gm_sfs         = gss_sk_sfs,
858 };
859
860 int __init init_sk_module(void)
861 {
862         int status;
863
864         status = lgss_mech_register(&gss_sk_mech);
865         if (status)
866                 CERROR("Failed to register sk gss mechanism!\n");
867
868         return status;
869 }
870
871 void cleanup_sk_module(void)
872 {
873         lgss_mech_unregister(&gss_sk_mech);
874 }