Whamcloud - gitweb
LU-6210 gss: Change positional struct initializers to C99
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_sk_mech.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (C) 2013, 2015, Trustees of Indiana University
24  *
25  * Copyright (c) 2014, 2016, Intel Corporation.
26  *
27  * Author: Jeremy Filizetti <jfilizet@iu.edu>
28  * Author: Andrew Korty <ajk@iu.edu>
29  */
30
31 #define DEBUG_SUBSYSTEM S_SEC
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/crypto.h>
36 #include <linux/mutex.h>
37 #include <crypto/ctr.h>
38
39 #include <obd.h>
40 #include <obd_class.h>
41 #include <obd_support.h>
42 #include <lustre/lustre_user.h>
43
44 #include "gss_err.h"
45 #include "gss_crypto.h"
46 #include "gss_internal.h"
47 #include "gss_api.h"
48 #include "gss_asn1.h"
49
50 #define SK_INTERFACE_VERSION 1
51 #define SK_MSG_VERSION 1
52 #define SK_MIN_SIZE 8
53 #define SK_IV_SIZE 16
54
55 /* Starting number for reverse contexts.  It is critical to security
56  * that reverse contexts use a different range of numbers than regular
57  * contexts because they are using the same key.  Therefore the IV/nonce
58  * combination must be unique for them.  To accomplish this reverse contexts
59  * use the the negative range of a 64-bit number and regular contexts use the
60  * postive range.  If the same IV/nonce combination were reused it would leak
61  * information about the plaintext. */
62 #define SK_IV_REV_START (1ULL << 63)
63
64 struct sk_ctx {
65         __u16                   sc_hmac;
66         __u16                   sc_crypt;
67         __u32                   sc_expire;
68         __u32                   sc_host_random;
69         __u32                   sc_peer_random;
70         atomic64_t              sc_iv;
71         rawobj_t                sc_hmac_key;
72         struct gss_keyblock     sc_session_kb;
73 };
74
75 struct sk_hdr {
76         __u64                   skh_version;
77         __u64                   skh_iv;
78 } __attribute__((packed));
79
80 /* The format of SK wire data is similar to that of RFC3686 ESP Payload
81  * (section 3) except instead of just an IV there is a struct sk_hdr.
82  * ---------------------------------------------------------------------
83  * | struct sk_hdr | ciphertext (variable size) | HMAC (variable size) |
84  * --------------------------------------------------------------------- */
85 struct sk_wire {
86         rawobj_t                skw_header;
87         rawobj_t                skw_cipher;
88         rawobj_t                skw_hmac;
89 };
90
91 static struct sk_crypt_type sk_crypt_types[] = {
92         [SK_CRYPT_AES256_CTR] = {
93                 .sct_name = "ctr(aes)",
94                 .sct_bytes = 32,
95         },
96 };
97
98 static struct sk_hmac_type sk_hmac_types[] = {
99         [SK_HMAC_SHA256] = {
100                 .sht_name = "hmac(sha256)",
101                 .sht_bytes = 32,
102         },
103         [SK_HMAC_SHA512] = {
104                 .sht_name = "hmac(sha512)",
105                 .sht_bytes = 64,
106         },
107 };
108
109 static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
110 {
111         return (len + blocksize - 1) & (~(blocksize - 1));
112 }
113
114 static int sk_fill_header(struct sk_ctx *skc, struct sk_hdr *skh)
115 {
116         __u64 tmp_iv;
117         skh->skh_version = be64_to_cpu(SK_MSG_VERSION);
118
119         /* Always using inc_return so we don't use our initial numbers which
120          * could be the reuse detecting numbers */
121         tmp_iv = atomic64_inc_return(&skc->sc_iv);
122         skh->skh_iv = be64_to_cpu(tmp_iv);
123         if (tmp_iv == 0 || tmp_iv == SK_IV_REV_START) {
124                 CERROR("Counter looped, connection must be reset to avoid "
125                        "plaintext information\n");
126                 return GSS_S_FAILURE;
127         }
128
129         return GSS_S_COMPLETE;
130 }
131
132 static int sk_verify_header(struct sk_hdr *skh)
133 {
134         if (cpu_to_be64(skh->skh_version) != SK_MSG_VERSION)
135                 return GSS_S_DEFECTIVE_TOKEN;
136
137         return GSS_S_COMPLETE;
138 }
139
140 void sk_construct_rfc3686_iv(__u8 *iv, __u32 nonce, __u64 partial_iv)
141 {
142         __u32 ctr = cpu_to_be32(1);
143
144         memcpy(iv, &nonce, CTR_RFC3686_NONCE_SIZE);
145         iv += CTR_RFC3686_NONCE_SIZE;
146         memcpy(iv, &partial_iv, CTR_RFC3686_IV_SIZE);
147         iv += CTR_RFC3686_IV_SIZE;
148         memcpy(iv, &ctr, sizeof(ctr));
149 }
150
151 static int sk_init_keys(struct sk_ctx *skc)
152 {
153         return gss_keyblock_init(&skc->sc_session_kb,
154                                  sk_crypt_types[skc->sc_crypt].sct_name, 0);
155 }
156
157 static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc)
158 {
159         char *ptr = inbuf->data;
160         char *end = inbuf->data + inbuf->len;
161         __u32 tmp;
162
163         /* see sk_serialize_kctx() for format from userspace side */
164         /*  1. Version */
165         if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
166                 CERROR("Failed to read shared key interface version");
167                 return -1;
168         }
169         if (tmp != SK_INTERFACE_VERSION) {
170                 CERROR("Invalid shared key interface version: %d\n", tmp);
171                 return -1;
172         }
173
174         /* 2. HMAC type */
175         if (gss_get_bytes(&ptr, end, &skc->sc_hmac, sizeof(skc->sc_hmac))) {
176                 CERROR("Failed to read HMAC algorithm type");
177                 return -1;
178         }
179         if (skc->sc_hmac <= SK_HMAC_EMPTY || skc->sc_hmac >= SK_HMAC_MAX) {
180                 CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
181                 return -1;
182         }
183
184         /* 3. crypt type */
185         if (gss_get_bytes(&ptr, end, &skc->sc_crypt, sizeof(skc->sc_crypt))) {
186                 CERROR("Failed to read crypt algorithm type");
187                 return -1;
188         }
189         if (skc->sc_crypt <= SK_CRYPT_EMPTY || skc->sc_crypt >= SK_CRYPT_MAX) {
190                 CERROR("Invalid crypt type: %d\n", skc->sc_crypt);
191                 return -1;
192         }
193
194         /* 4. expiration time */
195         if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
196                 CERROR("Failed to read context expiration time");
197                 return -1;
198         }
199         skc->sc_expire = tmp + cfs_time_current_sec();
200
201         /* 5. host random is used as nonce for encryption */
202         if (gss_get_bytes(&ptr, end, &skc->sc_host_random,
203                           sizeof(skc->sc_host_random))) {
204                 CERROR("Failed to read host random ");
205                 return -1;
206         }
207
208         /* 6. peer random is used as nonce for decryption */
209         if (gss_get_bytes(&ptr, end, &skc->sc_peer_random,
210                           sizeof(skc->sc_peer_random))) {
211                 CERROR("Failed to read peer random ");
212                 return -1;
213         }
214
215         /* 7. HMAC key */
216         if (gss_get_rawobj(&ptr, end, &skc->sc_hmac_key)) {
217                 CERROR("Failed to read HMAC key");
218                 return -1;
219         }
220         if (skc->sc_hmac_key.len <= SK_MIN_SIZE) {
221                 CERROR("HMAC key must key must be larger than %d bytes\n",
222                        SK_MIN_SIZE);
223                 return -1;
224         }
225
226         /* 8. Session key, can be empty if not using privacy mode */
227         if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
228                 CERROR("Failed to read session key");
229                 return -1;
230         }
231
232         return 0;
233 }
234
235 static void sk_delete_context(struct sk_ctx *skc)
236 {
237         if (!skc)
238                 return;
239
240         rawobj_free(&skc->sc_hmac_key);
241         gss_keyblock_free(&skc->sc_session_kb);
242         OBD_FREE_PTR(skc);
243 }
244
245 static
246 __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context)
247 {
248         struct sk_ctx *skc;
249         bool privacy = false;
250
251         if (inbuf == NULL || inbuf->data == NULL)
252                 return GSS_S_FAILURE;
253
254         OBD_ALLOC_PTR(skc);
255         if (!skc)
256                 return GSS_S_FAILURE;
257
258         atomic64_set(&skc->sc_iv, 0);
259
260         if (sk_fill_context(inbuf, skc))
261                 goto out_err;
262
263         /* Only privacy mode needs to initialize keys */
264         if (skc->sc_session_kb.kb_key.len > 0) {
265                 privacy = true;
266                 if (sk_init_keys(skc))
267                         goto out_err;
268         }
269
270         gss_context->internal_ctx_id = skc;
271         CDEBUG(D_SEC, "successfully imported sk%s context\n",
272                privacy ? "pi" : "i");
273
274         return GSS_S_COMPLETE;
275
276 out_err:
277         sk_delete_context(skc);
278         return GSS_S_FAILURE;
279 }
280
281 static
282 __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old,
283                                   struct gss_ctx *gss_context_new)
284 {
285         struct sk_ctx *skc_old = gss_context_old->internal_ctx_id;
286         struct sk_ctx *skc_new;
287
288         OBD_ALLOC_PTR(skc_new);
289         if (!skc_new)
290                 return GSS_S_FAILURE;
291
292         skc_new->sc_hmac = skc_old->sc_hmac;
293         skc_new->sc_crypt = skc_old->sc_crypt;
294         skc_new->sc_expire = skc_old->sc_expire;
295         skc_new->sc_host_random = skc_old->sc_host_random;
296         skc_new->sc_peer_random = skc_old->sc_peer_random;
297
298         atomic64_set(&skc_new->sc_iv, SK_IV_REV_START);
299
300         if (rawobj_dup(&skc_new->sc_hmac_key, &skc_old->sc_hmac_key))
301                 goto out_err;
302         if (gss_keyblock_dup(&skc_new->sc_session_kb, &skc_old->sc_session_kb))
303                 goto out_err;
304
305         /* Only privacy mode needs to initialize keys */
306         if (skc_new->sc_session_kb.kb_key.len > 0)
307                 if (sk_init_keys(skc_new))
308                         goto out_err;
309
310         gss_context_new->internal_ctx_id = skc_new;
311         CDEBUG(D_SEC, "successfully copied reverse sk context\n");
312
313         return GSS_S_COMPLETE;
314
315 out_err:
316         sk_delete_context(skc_new);
317         return GSS_S_FAILURE;
318 }
319
320 static
321 __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
322                              unsigned long *endtime)
323 {
324         struct sk_ctx *skc = gss_context->internal_ctx_id;
325
326         *endtime = skc->sc_expire;
327         return GSS_S_COMPLETE;
328 }
329
330 static
331 __u32 sk_make_hmac(char *alg_name, rawobj_t *key, int msg_count, rawobj_t *msgs,
332                    int iov_count, lnet_kiov_t *iovs, rawobj_t *token)
333 {
334         struct crypto_hash *tfm;
335         int rc;
336
337         tfm = crypto_alloc_hash(alg_name, 0, 0);
338         if (IS_ERR(tfm))
339                 return GSS_S_FAILURE;
340
341         rc = GSS_S_FAILURE;
342         LASSERT(token->len >= crypto_hash_digestsize(tfm));
343         if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
344                             token))
345                 rc = GSS_S_COMPLETE;
346
347         crypto_free_hash(tfm);
348         return rc;
349 }
350
351 static
352 __u32 gss_get_mic_sk(struct gss_ctx *gss_context,
353                      int message_count,
354                      rawobj_t *messages,
355                      int iov_count,
356                      lnet_kiov_t *iovs,
357                      rawobj_t *token)
358 {
359         struct sk_ctx *skc = gss_context->internal_ctx_id;
360         return sk_make_hmac(sk_hmac_types[skc->sc_hmac].sht_name,
361                             &skc->sc_hmac_key, message_count, messages,
362                             iov_count, iovs, token);
363 }
364
365 static
366 __u32 sk_verify_hmac(struct sk_hmac_type *sht, rawobj_t *key, int message_count,
367                          rawobj_t *messages, int iov_count, lnet_kiov_t *iovs,
368                          rawobj_t *token)
369 {
370         rawobj_t checksum = RAWOBJ_EMPTY;
371         __u32 rc = GSS_S_FAILURE;
372
373         checksum.len = sht->sht_bytes;
374         if (token->len < checksum.len) {
375                 CDEBUG(D_SEC, "Token received too short, expected %d "
376                        "received %d\n", token->len, checksum.len);
377                 return GSS_S_DEFECTIVE_TOKEN;
378         }
379
380         OBD_ALLOC_LARGE(checksum.data, checksum.len);
381         if (!checksum.data)
382                 return rc;
383
384         if (sk_make_hmac(sht->sht_name, key, message_count, messages,
385                          iov_count, iovs, &checksum)) {
386                 CDEBUG(D_SEC, "Failed to create checksum to validate\n");
387                 goto cleanup;
388         }
389
390         if (memcmp(token->data, checksum.data, checksum.len)) {
391                 CERROR("checksum mismatch\n");
392                 rc = GSS_S_BAD_SIG;
393                 goto cleanup;
394         }
395
396         rc = GSS_S_COMPLETE;
397
398 cleanup:
399         OBD_FREE(checksum.data, checksum.len);
400         return rc;
401 }
402
403 /* sk_verify_bulk_hmac() differs slightly from sk_verify_hmac() because all
404  * encrypted pages in the bulk descriptor are populated although we only need
405  * to decrypt up to the number of bytes actually specified from the sender
406  * (bd_nob) otherwise the calulated HMAC will be incorrect. */
407 static
408 __u32 sk_verify_bulk_hmac(struct sk_hmac_type *sht, rawobj_t *key,
409                           int msgcnt, rawobj_t *msgs, int iovcnt,
410                           lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token)
411 {
412         rawobj_t checksum = RAWOBJ_EMPTY;
413         struct crypto_hash *tfm;
414         struct hash_desc desc = {
415                 .tfm = NULL,
416                 .flags = 0,
417         };
418         struct scatterlist sg[1];
419         struct sg_table sgt;
420         int bytes;
421         int i;
422         int rc = GSS_S_FAILURE;
423
424         checksum.len = sht->sht_bytes;
425         if (token->len < checksum.len) {
426                 CDEBUG(D_SEC, "Token received too short, expected %d "
427                        "received %d\n", token->len, checksum.len);
428                 return GSS_S_DEFECTIVE_TOKEN;
429         }
430
431         OBD_ALLOC_LARGE(checksum.data, checksum.len);
432         if (!checksum.data)
433                 return rc;
434
435         tfm = crypto_alloc_hash(sht->sht_name, 0, 0);
436         if (IS_ERR(tfm))
437                 goto cleanup;
438
439         desc.tfm = tfm;
440
441         LASSERT(token->len >= crypto_hash_digestsize(tfm));
442
443         rc = crypto_hash_setkey(tfm, key->data, key->len);
444         if (rc)
445                 goto hash_cleanup;
446
447         rc = crypto_hash_init(&desc);
448         if (rc)
449                 goto hash_cleanup;
450
451         for (i = 0; i < msgcnt; i++) {
452                 if (msgs[i].len == 0)
453                         continue;
454
455                 rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
456                 if (rc != 0)
457                         goto hash_cleanup;
458
459                 rc = crypto_hash_update(&desc, sg, msgs[i].len);
460                 if (rc) {
461                         gss_teardown_sgtable(&sgt);
462                         goto hash_cleanup;
463                 }
464
465                 gss_teardown_sgtable(&sgt);
466         }
467
468         for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
469                 if (iovs[i].kiov_len == 0)
470                         continue;
471
472                 bytes = min_t(int, iov_bytes, iovs[i].kiov_len);
473                 iov_bytes -= bytes;
474
475                 sg_init_table(sg, 1);
476                 sg_set_page(&sg[0], iovs[i].kiov_page, bytes,
477                             iovs[i].kiov_offset);
478                 rc = crypto_hash_update(&desc, sg, bytes);
479                 if (rc)
480                         goto hash_cleanup;
481         }
482
483         crypto_hash_final(&desc, checksum.data);
484
485         if (memcmp(token->data, checksum.data, checksum.len)) {
486                 rc = GSS_S_BAD_SIG;
487                 goto hash_cleanup;
488         }
489
490         rc = GSS_S_COMPLETE;
491
492 hash_cleanup:
493         crypto_free_hash(tfm);
494
495 cleanup:
496         OBD_FREE_LARGE(checksum.data, checksum.len);
497
498         return rc;
499 }
500
501 static
502 __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
503                         int message_count,
504                         rawobj_t *messages,
505                         int iov_count,
506                         lnet_kiov_t *iovs,
507                         rawobj_t *token)
508 {
509         struct sk_ctx *skc = gss_context->internal_ctx_id;
510         return sk_verify_hmac(&sk_hmac_types[skc->sc_hmac], &skc->sc_hmac_key,
511                               message_count, messages, iov_count, iovs, token);
512 }
513
514 static
515 __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
516                     rawobj_t *message, int message_buffer_length,
517                     rawobj_t *token)
518 {
519         struct sk_ctx *skc = gss_context->internal_ctx_id;
520         struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
521         struct sk_wire skw;
522         struct sk_hdr skh;
523         rawobj_t msgbufs[3];
524         __u8 local_iv[SK_IV_SIZE];
525         unsigned int blocksize;
526
527         LASSERT(skc->sc_session_kb.kb_tfm);
528
529         blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
530         if (gss_add_padding(message, message_buffer_length, blocksize))
531                 return GSS_S_FAILURE;
532
533         memset(token->data, 0, token->len);
534
535         if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
536                 return GSS_S_FAILURE;
537
538         skw.skw_header.data = token->data;
539         skw.skw_header.len = sizeof(skh);
540         memcpy(skw.skw_header.data, &skh, sizeof(skh));
541
542         sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
543         skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
544         skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
545         if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message,
546                               &skw.skw_cipher, 1))
547                 return GSS_S_FAILURE;
548
549         /* HMAC covers the SK header, GSS header, and ciphertext */
550         msgbufs[0] = skw.skw_header;
551         msgbufs[1] = *gss_header;
552         msgbufs[2] = skw.skw_cipher;
553
554         skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
555         skw.skw_hmac.len = sht->sht_bytes;
556         if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 3, msgbufs, 0,
557                          NULL, &skw.skw_hmac))
558                 return GSS_S_FAILURE;
559
560         token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len;
561
562         return GSS_S_COMPLETE;
563 }
564
565 static
566 __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
567                       rawobj_t *token, rawobj_t *message)
568 {
569         struct sk_ctx *skc = gss_context->internal_ctx_id;
570         struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
571         struct sk_wire skw;
572         struct sk_hdr *skh;
573         rawobj_t msgbufs[3];
574         __u8 local_iv[SK_IV_SIZE];
575         unsigned int blocksize;
576         int rc;
577
578         LASSERT(skc->sc_session_kb.kb_tfm);
579
580         if (token->len < sizeof(skh) + sht->sht_bytes)
581                 return GSS_S_DEFECTIVE_TOKEN;
582
583         skw.skw_header.data = token->data;
584         skw.skw_header.len = sizeof(struct sk_hdr);
585         skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
586         skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
587         skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
588         skw.skw_hmac.len = sht->sht_bytes;
589
590         blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
591         if (skw.skw_cipher.len % blocksize != 0)
592                 return GSS_S_DEFECTIVE_TOKEN;
593
594         skh = (struct sk_hdr *)skw.skw_header.data;
595         rc = sk_verify_header(skh);
596         if (rc != GSS_S_COMPLETE)
597                 return rc;
598
599         /* HMAC covers the SK header, GSS header, and ciphertext */
600         msgbufs[0] = skw.skw_header;
601         msgbufs[1] = *gss_header;
602         msgbufs[2] = skw.skw_cipher;
603         rc = sk_verify_hmac(sht, &skc->sc_hmac_key, 3, msgbufs, 0, NULL,
604                             &skw.skw_hmac);
605         if (rc)
606                 return rc;
607
608         sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
609         message->len = skw.skw_cipher.len;
610         if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv,
611                               1, &skw.skw_cipher, message, 0))
612                 return GSS_S_FAILURE;
613
614         return GSS_S_COMPLETE;
615 }
616
617 static
618 __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
619                        struct ptlrpc_bulk_desc *desc)
620 {
621         struct sk_ctx *skc = gss_context->internal_ctx_id;
622         int blocksize;
623         int i;
624
625         LASSERT(skc->sc_session_kb.kb_tfm);
626         blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
627
628         for (i = 0; i < desc->bd_iov_count; i++) {
629                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
630                         CERROR("offset %d not blocksize aligned\n",
631                                BD_GET_KIOV(desc, i).kiov_offset);
632                         return GSS_S_FAILURE;
633                 }
634
635                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
636                         BD_GET_KIOV(desc, i).kiov_offset;
637                 BD_GET_ENC_KIOV(desc, i).kiov_len =
638                         sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize);
639         }
640
641         return GSS_S_COMPLETE;
642 }
643
644 static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
645                              struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
646                              int adj_nob)
647 {
648         struct blkcipher_desc cdesc = {
649                 .tfm = tfm,
650                 .info = iv,
651                 .flags = 0,
652         };
653         struct scatterlist ptxt;
654         struct scatterlist ctxt;
655         int blocksize;
656         int i;
657         int rc;
658         int nob = 0;
659
660         blocksize = crypto_blkcipher_blocksize(tfm);
661
662         sg_init_table(&ptxt, 1);
663         sg_init_table(&ctxt, 1);
664
665         for (i = 0; i < desc->bd_iov_count; i++) {
666                 sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page,
667                             sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
668                                           blocksize),
669                             BD_GET_KIOV(desc, i).kiov_offset);
670                 nob += ptxt.length;
671
672                 sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
673                             ptxt.length, ptxt.offset);
674
675                 BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
676                 BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
677
678                 rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
679                                                  ptxt.length);
680                 if (rc) {
681                         CERROR("failed to encrypt page: %d\n", rc);
682                         return rc;
683                 }
684         }
685
686         if (adj_nob)
687                 desc->bd_nob = nob;
688
689         return 0;
690 }
691
692 static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
693                              struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
694                              int adj_nob)
695 {
696         struct blkcipher_desc cdesc = {
697                 .tfm = tfm,
698                 .info = iv,
699                 .flags = 0,
700         };
701         struct scatterlist ptxt;
702         struct scatterlist ctxt;
703         int blocksize;
704         int i;
705         int rc;
706         int pnob = 0;
707         int cnob = 0;
708
709         sg_init_table(&ptxt, 1);
710         sg_init_table(&ctxt, 1);
711
712         blocksize = crypto_blkcipher_blocksize(tfm);
713         if (desc->bd_nob_transferred % blocksize != 0) {
714                 CERROR("Transfer not a multiple of block size: %d\n",
715                        desc->bd_nob_transferred);
716                 return GSS_S_DEFECTIVE_TOKEN;
717         }
718
719         for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
720              i++) {
721                 lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
722                 lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
723
724                 if (ciov->kiov_offset % blocksize != 0 ||
725                     ciov->kiov_len % blocksize != 0) {
726                         CERROR("Invalid bulk descriptor vector\n");
727                         return GSS_S_DEFECTIVE_TOKEN;
728                 }
729
730                 /* Must adjust bytes here because we know the actual sizes after
731                  * decryption.  Similar to what gss_cli_ctx_unwrap_bulk does for
732                  * integrity only mode */
733                 if (adj_nob) {
734                         /* cipher text must not exceed transferred size */
735                         if (ciov->kiov_len + cnob > desc->bd_nob_transferred)
736                                 ciov->kiov_len =
737                                         desc->bd_nob_transferred - cnob;
738
739                         piov->kiov_len = ciov->kiov_len;
740
741                         /* plain text must not exceed bulk's size */
742                         if (ciov->kiov_len + pnob > desc->bd_nob)
743                                 piov->kiov_len = desc->bd_nob - pnob;
744                 } else {
745                         /* Taken from krb5_decrypt since it was not verified
746                          * whether or not LNET guarantees these */
747                         if (ciov->kiov_len + cnob > desc->bd_nob_transferred ||
748                             piov->kiov_len > ciov->kiov_len) {
749                                 CERROR("Invalid decrypted length\n");
750                                 return GSS_S_FAILURE;
751                         }
752                 }
753
754                 if (ciov->kiov_len == 0)
755                         continue;
756
757                 sg_init_table(&ctxt, 1);
758                 sg_set_page(&ctxt, ciov->kiov_page, ciov->kiov_len,
759                             ciov->kiov_offset);
760                 ptxt = ctxt;
761
762                 /* In the event the plain text size is not a multiple
763                  * of blocksize we decrypt in place and copy the result
764                  * after the decryption */
765                 if (piov->kiov_len % blocksize == 0)
766                         sg_assign_page(&ptxt, piov->kiov_page);
767
768                 rc = crypto_blkcipher_decrypt_iv(&cdesc, &ptxt, &ctxt,
769                                                  ctxt.length);
770                 if (rc) {
771                         CERROR("Decryption failed for page: %d\n", rc);
772                         return GSS_S_FAILURE;
773                 }
774
775                 if (piov->kiov_len % blocksize != 0) {
776                         memcpy(page_address(piov->kiov_page) +
777                                piov->kiov_offset,
778                                page_address(ciov->kiov_page) +
779                                ciov->kiov_offset,
780                                piov->kiov_len);
781                 }
782
783                 cnob += ciov->kiov_len;
784                 pnob += piov->kiov_len;
785         }
786
787         /* if needed, clear up the rest unused iovs */
788         if (adj_nob)
789                 while (i < desc->bd_iov_count)
790                         BD_GET_KIOV(desc, i++).kiov_len = 0;
791
792         if (unlikely(cnob != desc->bd_nob_transferred)) {
793                 CERROR("%d cipher text transferred but only %d decrypted\n",
794                        desc->bd_nob_transferred, cnob);
795                 return GSS_S_FAILURE;
796         }
797
798         if (unlikely(!adj_nob && pnob != desc->bd_nob)) {
799                 CERROR("%d plain text expected but only %d received\n",
800                        desc->bd_nob, pnob);
801                 return GSS_S_FAILURE;
802         }
803
804         return 0;
805 }
806
807 static
808 __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
809                        struct ptlrpc_bulk_desc *desc, rawobj_t *token,
810                        int adj_nob)
811 {
812         struct sk_ctx *skc = gss_context->internal_ctx_id;
813         struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
814         struct sk_wire skw;
815         struct sk_hdr skh;
816         __u8 local_iv[SK_IV_SIZE];
817
818         LASSERT(skc->sc_session_kb.kb_tfm);
819
820         memset(token->data, 0, token->len);
821         if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
822                 return GSS_S_FAILURE;
823
824         skw.skw_header.data = token->data;
825         skw.skw_header.len = sizeof(skh);
826         memcpy(skw.skw_header.data, &skh, sizeof(skh));
827
828         sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
829         skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
830         skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
831         if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
832                             desc, &skw.skw_cipher, adj_nob))
833                 return GSS_S_FAILURE;
834
835         skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
836         skw.skw_hmac.len = sht->sht_bytes;
837         if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 1, &skw.skw_cipher,
838                          desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac))
839                 return GSS_S_FAILURE;
840
841         return GSS_S_COMPLETE;
842 }
843
844 static
845 __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
846                            struct ptlrpc_bulk_desc *desc,
847                            rawobj_t *token, int adj_nob)
848 {
849         struct sk_ctx *skc = gss_context->internal_ctx_id;
850         struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
851         struct sk_wire skw;
852         struct sk_hdr *skh;
853         __u8 local_iv[SK_IV_SIZE];
854         int rc;
855
856         LASSERT(skc->sc_session_kb.kb_tfm);
857
858         if (token->len < sizeof(skh) + sht->sht_bytes)
859                 return GSS_S_DEFECTIVE_TOKEN;
860
861         skw.skw_header.data = token->data;
862         skw.skw_header.len = sizeof(struct sk_hdr);
863         skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
864         skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
865         skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
866         skw.skw_hmac.len = sht->sht_bytes;
867
868         skh = (struct sk_hdr *)skw.skw_header.data;
869         rc = sk_verify_header(skh);
870         if (rc != GSS_S_COMPLETE)
871                 return rc;
872
873         rc = sk_verify_bulk_hmac(&sk_hmac_types[skc->sc_hmac],
874                                  &skc->sc_hmac_key, 1, &skw.skw_cipher,
875                                  desc->bd_iov_count, GET_ENC_KIOV(desc),
876                                  desc->bd_nob, &skw.skw_hmac);
877         if (rc)
878                 return rc;
879
880         sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
881         rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
882                              desc, &skw.skw_cipher, adj_nob);
883         if (rc)
884                 return rc;
885
886         return GSS_S_COMPLETE;
887 }
888
889 static
890 void gss_delete_sec_context_sk(void *internal_context)
891 {
892         struct sk_ctx *sk_context = internal_context;
893         sk_delete_context(sk_context);
894 }
895
896 int gss_display_sk(struct gss_ctx *gss_context, char *buf, int bufsize)
897 {
898         return snprintf(buf, bufsize, "sk");
899 }
900
901 static struct gss_api_ops gss_sk_ops = {
902         .gss_import_sec_context     = gss_import_sec_context_sk,
903         .gss_copy_reverse_context   = gss_copy_reverse_context_sk,
904         .gss_inquire_context        = gss_inquire_context_sk,
905         .gss_get_mic                = gss_get_mic_sk,
906         .gss_verify_mic             = gss_verify_mic_sk,
907         .gss_wrap                   = gss_wrap_sk,
908         .gss_unwrap                 = gss_unwrap_sk,
909         .gss_prep_bulk              = gss_prep_bulk_sk,
910         .gss_wrap_bulk              = gss_wrap_bulk_sk,
911         .gss_unwrap_bulk            = gss_unwrap_bulk_sk,
912         .gss_delete_sec_context     = gss_delete_sec_context_sk,
913         .gss_display                = gss_display_sk,
914 };
915
916 static struct subflavor_desc gss_sk_sfs[] = {
917         {
918                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKN,
919                 .sf_qop         = 0,
920                 .sf_service     = SPTLRPC_SVC_NULL,
921                 .sf_name        = "skn"
922         },
923         {
924                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKA,
925                 .sf_qop         = 0,
926                 .sf_service     = SPTLRPC_SVC_AUTH,
927                 .sf_name        = "ska"
928         },
929         {
930                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKI,
931                 .sf_qop         = 0,
932                 .sf_service     = SPTLRPC_SVC_INTG,
933                 .sf_name        = "ski"
934         },
935         {
936                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKPI,
937                 .sf_qop         = 0,
938                 .sf_service     = SPTLRPC_SVC_PRIV,
939                 .sf_name        = "skpi"
940         },
941 };
942
943 static struct gss_api_mech gss_sk_mech = {
944         /* .gm_owner uses default NULL value for THIS_MODULE */
945         .gm_name        = "sk",
946         .gm_oid         = (rawobj_t) {
947                 .len = 12,
948                 .data = "\053\006\001\004\001\311\146\215\126\001\000\001",
949         },
950         .gm_ops         = &gss_sk_ops,
951         .gm_sf_num      = 4,
952         .gm_sfs         = gss_sk_sfs,
953 };
954
955 int __init init_sk_module(void)
956 {
957         int status;
958
959         status = lgss_mech_register(&gss_sk_mech);
960         if (status)
961                 CERROR("Failed to register sk gss mechanism!\n");
962
963         return status;
964 }
965
966 void cleanup_sk_module(void)
967 {
968         lgss_mech_unregister(&gss_sk_mech);
969 }