Whamcloud - gitweb
94063705d4cdee6bb876f016c5cd919f31ab2ff1
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_sk_mech.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (C) 2013, 2015, Trustees of Indiana University
24  *
25  * Copyright (c) 2014, 2016, Intel Corporation.
26  *
27  * Author: Jeremy Filizetti <jfilizet@iu.edu>
28  * Author: Andrew Korty <ajk@iu.edu>
29  */
30
31 #define DEBUG_SUBSYSTEM S_SEC
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/crypto.h>
36 #include <linux/mutex.h>
37 #include <crypto/ctr.h>
38
39 #include <obd.h>
40 #include <obd_class.h>
41 #include <obd_support.h>
42
43 #include "gss_err.h"
44 #include "gss_crypto.h"
45 #include "gss_internal.h"
46 #include "gss_api.h"
47 #include "gss_asn1.h"
48
49 #define SK_INTERFACE_VERSION 1
50 #define SK_MSG_VERSION 1
51 #define SK_MIN_SIZE 8
52 #define SK_IV_SIZE 16
53
54 /* Starting number for reverse contexts.  It is critical to security
55  * that reverse contexts use a different range of numbers than regular
56  * contexts because they are using the same key.  Therefore the IV/nonce
57  * combination must be unique for them.  To accomplish this reverse contexts
58  * use the the negative range of a 64-bit number and regular contexts use the
59  * postive range.  If the same IV/nonce combination were reused it would leak
60  * information about the plaintext. */
61 #define SK_IV_REV_START (1ULL << 63)
62
63 struct sk_ctx {
64         enum cfs_crypto_crypt_alg sc_crypt;
65         enum cfs_crypto_hash_alg  sc_hmac;
66         __u32                     sc_expire;
67         __u32                     sc_host_random;
68         __u32                     sc_peer_random;
69         atomic64_t                sc_iv;
70         rawobj_t                  sc_hmac_key;
71         struct gss_keyblock       sc_session_kb;
72 };
73
74 struct sk_hdr {
75         __u64                   skh_version;
76         __u64                   skh_iv;
77 } __attribute__((packed));
78
79 /* The format of SK wire data is similar to that of RFC3686 ESP Payload
80  * (section 3) except instead of just an IV there is a struct sk_hdr.
81  * ---------------------------------------------------------------------
82  * | struct sk_hdr | ciphertext (variable size) | HMAC (variable size) |
83  * --------------------------------------------------------------------- */
84 struct sk_wire {
85         rawobj_t                skw_header;
86         rawobj_t                skw_cipher;
87         rawobj_t                skw_hmac;
88 };
89
90 static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
91 {
92         return (len + blocksize - 1) & (~(blocksize - 1));
93 }
94
95 static int sk_fill_header(struct sk_ctx *skc, struct sk_hdr *skh)
96 {
97         __u64 tmp_iv;
98         skh->skh_version = be64_to_cpu(SK_MSG_VERSION);
99
100         /* Always using inc_return so we don't use our initial numbers which
101          * could be the reuse detecting numbers */
102         tmp_iv = atomic64_inc_return(&skc->sc_iv);
103         skh->skh_iv = be64_to_cpu(tmp_iv);
104         if (tmp_iv == 0 || tmp_iv == SK_IV_REV_START) {
105                 CERROR("Counter looped, connection must be reset to avoid "
106                        "plaintext information\n");
107                 return GSS_S_FAILURE;
108         }
109
110         return GSS_S_COMPLETE;
111 }
112
113 static int sk_verify_header(struct sk_hdr *skh)
114 {
115         if (cpu_to_be64(skh->skh_version) != SK_MSG_VERSION)
116                 return GSS_S_DEFECTIVE_TOKEN;
117
118         return GSS_S_COMPLETE;
119 }
120
121 void sk_construct_rfc3686_iv(__u8 *iv, __u32 nonce, __u64 partial_iv)
122 {
123         __u32 ctr = cpu_to_be32(1);
124
125         memcpy(iv, &nonce, CTR_RFC3686_NONCE_SIZE);
126         iv += CTR_RFC3686_NONCE_SIZE;
127         memcpy(iv, &partial_iv, CTR_RFC3686_IV_SIZE);
128         iv += CTR_RFC3686_IV_SIZE;
129         memcpy(iv, &ctr, sizeof(ctr));
130 }
131
132 static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc)
133 {
134         char *ptr = inbuf->data;
135         char *end = inbuf->data + inbuf->len;
136         char sk_hmac[CRYPTO_MAX_ALG_NAME];
137         char sk_crypt[CRYPTO_MAX_ALG_NAME];
138         u32 tmp;
139
140         /* see sk_serialize_kctx() for format from userspace side */
141         /*  1. Version */
142         if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
143                 CERROR("Failed to read shared key interface version\n");
144                 return -1;
145         }
146         if (tmp != SK_INTERFACE_VERSION) {
147                 CERROR("Invalid shared key interface version: %d\n", tmp);
148                 return -1;
149         }
150
151         /* 2. HMAC type */
152         if (gss_get_bytes(&ptr, end, &sk_hmac, sizeof(sk_hmac))) {
153                 CERROR("Failed to read HMAC algorithm type\n");
154                 return -1;
155         }
156
157         skc->sc_hmac = cfs_crypto_hash_alg(sk_hmac);
158         if (skc->sc_hmac != CFS_HASH_ALG_NULL &&
159             skc->sc_hmac != CFS_HASH_ALG_SHA256 &&
160             skc->sc_hmac != CFS_HASH_ALG_SHA512) {
161                 CERROR("Invalid hmac type: %s\n", sk_hmac);
162                 return -1;
163         }
164
165         /* 3. crypt type */
166         if (gss_get_bytes(&ptr, end, &sk_crypt, sizeof(sk_crypt))) {
167                 CERROR("Failed to read crypt algorithm type\n");
168                 return -1;
169         }
170
171         skc->sc_crypt = cfs_crypto_crypt_alg(sk_crypt);
172         if (skc->sc_crypt == CFS_CRYPT_ALG_UNKNOWN) {
173                 CERROR("Invalid crypt type: %s\n", sk_crypt);
174                 return -1;
175         }
176
177         /* 4. expiration time */
178         if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
179                 CERROR("Failed to read context expiration time\n");
180                 return -1;
181         }
182         skc->sc_expire = tmp + ktime_get_real_seconds();
183
184         /* 5. host random is used as nonce for encryption */
185         if (gss_get_bytes(&ptr, end, &skc->sc_host_random,
186                           sizeof(skc->sc_host_random))) {
187                 CERROR("Failed to read host random\n");
188                 return -1;
189         }
190
191         /* 6. peer random is used as nonce for decryption */
192         if (gss_get_bytes(&ptr, end, &skc->sc_peer_random,
193                           sizeof(skc->sc_peer_random))) {
194                 CERROR("Failed to read peer random\n");
195                 return -1;
196         }
197
198         /* 7. HMAC key */
199         if (gss_get_rawobj(&ptr, end, &skc->sc_hmac_key)) {
200                 CERROR("Failed to read HMAC key\n");
201                 return -1;
202         }
203         if (skc->sc_hmac_key.len <= SK_MIN_SIZE) {
204                 CERROR("HMAC key must key must be larger than %d bytes\n",
205                        SK_MIN_SIZE);
206                 return -1;
207         }
208
209         /* 8. Session key, can be empty if not using privacy mode */
210         if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
211                 CERROR("Failed to read session key\n");
212                 return -1;
213         }
214
215         return 0;
216 }
217
218 static void sk_delete_context(struct sk_ctx *skc)
219 {
220         if (!skc)
221                 return;
222
223         rawobj_free(&skc->sc_hmac_key);
224         gss_keyblock_free(&skc->sc_session_kb);
225         OBD_FREE_PTR(skc);
226 }
227
228 static
229 __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context)
230 {
231         struct sk_ctx *skc;
232         bool privacy = false;
233
234         if (inbuf == NULL || inbuf->data == NULL)
235                 return GSS_S_FAILURE;
236
237         OBD_ALLOC_PTR(skc);
238         if (!skc)
239                 return GSS_S_FAILURE;
240
241         atomic64_set(&skc->sc_iv, 0);
242
243         if (sk_fill_context(inbuf, skc))
244                 goto out_err;
245
246         /* Only privacy mode needs to initialize keys */
247         if (skc->sc_session_kb.kb_key.len > 0) {
248                 privacy = true;
249                 if (gss_keyblock_init(&skc->sc_session_kb,
250                                       cfs_crypto_crypt_name(skc->sc_crypt), 0))
251                         goto out_err;
252         }
253
254         gss_context->internal_ctx_id = skc;
255         CDEBUG(D_SEC, "successfully imported sk%s context\n",
256                privacy ? " (with privacy)" : "");
257
258         return GSS_S_COMPLETE;
259
260 out_err:
261         sk_delete_context(skc);
262         return GSS_S_FAILURE;
263 }
264
265 static
266 __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old,
267                                   struct gss_ctx *gss_context_new)
268 {
269         struct sk_ctx *skc_old = gss_context_old->internal_ctx_id;
270         struct sk_ctx *skc_new;
271
272         OBD_ALLOC_PTR(skc_new);
273         if (!skc_new)
274                 return GSS_S_FAILURE;
275
276         skc_new->sc_hmac = skc_old->sc_hmac;
277         skc_new->sc_crypt = skc_old->sc_crypt;
278         skc_new->sc_expire = skc_old->sc_expire;
279         skc_new->sc_host_random = skc_old->sc_host_random;
280         skc_new->sc_peer_random = skc_old->sc_peer_random;
281
282         atomic64_set(&skc_new->sc_iv, SK_IV_REV_START);
283
284         if (rawobj_dup(&skc_new->sc_hmac_key, &skc_old->sc_hmac_key))
285                 goto out_err;
286         if (gss_keyblock_dup(&skc_new->sc_session_kb, &skc_old->sc_session_kb))
287                 goto out_err;
288
289         /* Only privacy mode needs to initialize keys */
290         if (skc_new->sc_session_kb.kb_key.len > 0)
291                 if (gss_keyblock_init(&skc_new->sc_session_kb,
292                                       cfs_crypto_crypt_name(skc_new->sc_crypt),
293                                       0))
294                         goto out_err;
295
296         gss_context_new->internal_ctx_id = skc_new;
297         CDEBUG(D_SEC, "successfully copied reverse sk context\n");
298
299         return GSS_S_COMPLETE;
300
301 out_err:
302         sk_delete_context(skc_new);
303         return GSS_S_FAILURE;
304 }
305
306 static
307 __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
308                              time64_t *endtime)
309 {
310         struct sk_ctx *skc = gss_context->internal_ctx_id;
311
312         *endtime = skc->sc_expire;
313         return GSS_S_COMPLETE;
314 }
315
316 static
317 u32 sk_make_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key, int msg_count,
318                  rawobj_t *msgs, int iov_count, struct bio_vec *iovs,
319                  rawobj_t *token, digest_hash hash_func)
320 {
321         struct ahash_request *req;
322         int rc2, rc;
323
324         req = cfs_crypto_hash_init(algo, key->data, key->len);
325         if (IS_ERR(req)) {
326                 rc = PTR_ERR(req);
327                 goto out_init_failed;
328         }
329
330
331         if (hash_func)
332                 rc2 = hash_func(req, NULL, msg_count, msgs, iov_count,
333                                 iovs);
334         else
335                 rc2 = gss_digest_hash(req, NULL, msg_count, msgs, iov_count,
336                                       iovs);
337
338         rc = cfs_crypto_hash_final(req, token->data, &token->len);
339         if (!rc && rc2)
340                 rc = rc2;
341 out_init_failed:
342         return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
343 }
344
345 static
346 __u32 gss_get_mic_sk(struct gss_ctx *gss_context,
347                      int message_count,
348                      rawobj_t *messages,
349                      int iov_count,
350                      struct bio_vec *iovs,
351                      rawobj_t *token)
352 {
353         struct sk_ctx *skc = gss_context->internal_ctx_id;
354
355         return sk_make_hmac(skc->sc_hmac,
356                             &skc->sc_hmac_key, message_count, messages,
357                             iov_count, iovs, token, gss_context->hash_func);
358 }
359
360 static
361 u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key,
362                    int message_count, rawobj_t *messages,
363                    int iov_count, struct bio_vec *iovs,
364                    rawobj_t *token, digest_hash hash_func)
365 {
366         rawobj_t checksum = RAWOBJ_EMPTY;
367         __u32 rc = GSS_S_FAILURE;
368
369         checksum.len = cfs_crypto_hash_digestsize(algo);
370         if (token->len < checksum.len) {
371                 CDEBUG(D_SEC, "Token received too short, expected %d "
372                        "received %d\n", token->len, checksum.len);
373                 return GSS_S_DEFECTIVE_TOKEN;
374         }
375
376         OBD_ALLOC_LARGE(checksum.data, checksum.len);
377         if (!checksum.data)
378                 return rc;
379
380         if (sk_make_hmac(algo, key, message_count,
381                          messages, iov_count, iovs, &checksum,
382                          hash_func)) {
383                 CDEBUG(D_SEC, "Failed to create checksum to validate\n");
384                 goto cleanup;
385         }
386
387         if (memcmp(token->data, checksum.data, checksum.len)) {
388                 CERROR("checksum mismatch\n");
389                 rc = GSS_S_BAD_SIG;
390                 goto cleanup;
391         }
392
393         rc = GSS_S_COMPLETE;
394
395 cleanup:
396         OBD_FREE(checksum.data, checksum.len);
397         return rc;
398 }
399
400 /* sk_verify_bulk_hmac() differs slightly from sk_verify_hmac() because all
401  * encrypted pages in the bulk descriptor are populated although we only need
402  * to decrypt up to the number of bytes actually specified from the sender
403  * (bd_nob) otherwise the calulated HMAC will be incorrect. */
404 static
405 u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac, rawobj_t *key,
406                         int msgcnt, rawobj_t *msgs, int iovcnt,
407                         struct bio_vec *iovs, int iov_bytes, rawobj_t *token)
408 {
409         rawobj_t checksum = RAWOBJ_EMPTY;
410         struct ahash_request *req;
411         struct scatterlist sg[1];
412         int rc = 0;
413         struct sg_table sgt;
414         int bytes;
415         int i;
416
417         checksum.len = cfs_crypto_hash_digestsize(sc_hmac);
418         if (token->len < checksum.len) {
419                 CDEBUG(D_SEC, "Token received too short, expected %d "
420                        "received %d\n", token->len, checksum.len);
421                 return GSS_S_DEFECTIVE_TOKEN;
422         }
423
424         OBD_ALLOC_LARGE(checksum.data, checksum.len);
425         if (!checksum.data)
426                 return GSS_S_FAILURE;
427
428         req = cfs_crypto_hash_init(sc_hmac, key->data, key->len);
429         if (IS_ERR(req)) {
430                 rc = GSS_S_FAILURE;
431                 goto cleanup;
432         }
433
434         for (i = 0; i < msgcnt; i++) {
435                 if (!msgs[i].len)
436                         continue;
437
438                 rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
439                 if (rc != 0)
440                         goto hash_cleanup;
441
442                 ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
443                 rc = crypto_ahash_update(req);
444                 if (rc) {
445                         gss_teardown_sgtable(&sgt);
446                         goto hash_cleanup;
447                 }
448
449                 gss_teardown_sgtable(&sgt);
450         }
451
452         for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
453                 if (iovs[i].bv_len == 0)
454                         continue;
455
456                 bytes = min_t(int, iov_bytes, iovs[i].bv_len);
457                 iov_bytes -= bytes;
458
459                 sg_init_table(sg, 1);
460                 sg_set_page(&sg[0], iovs[i].bv_page, bytes,
461                             iovs[i].bv_offset);
462                 ahash_request_set_crypt(req, sg, NULL, bytes);
463                 rc = crypto_ahash_update(req);
464                 if (rc)
465                         goto hash_cleanup;
466         }
467
468 hash_cleanup:
469         cfs_crypto_hash_final(req, checksum.data, &checksum.len);
470         if (rc)
471                 goto cleanup;
472
473         if (memcmp(token->data, checksum.data, checksum.len))
474                 rc = GSS_S_BAD_SIG;
475         else
476                 rc = GSS_S_COMPLETE;
477
478 cleanup:
479         OBD_FREE_LARGE(checksum.data, checksum.len);
480
481         return rc;
482 }
483
484 static
485 __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
486                         int message_count,
487                         rawobj_t *messages,
488                         int iov_count,
489                         struct bio_vec *iovs,
490                         rawobj_t *token)
491 {
492         struct sk_ctx *skc = gss_context->internal_ctx_id;
493
494         return sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key,
495                               message_count, messages, iov_count, iovs, token,
496                               gss_context->hash_func);
497 }
498
499 static
500 __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
501                     rawobj_t *message, int message_buffer_length,
502                     rawobj_t *token)
503 {
504         struct sk_ctx *skc = gss_context->internal_ctx_id;
505         size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
506         struct sk_wire skw;
507         struct sk_hdr skh;
508         rawobj_t msgbufs[3];
509         __u8 local_iv[SK_IV_SIZE];
510         unsigned int blocksize;
511
512         LASSERT(skc->sc_session_kb.kb_tfm);
513
514         blocksize = crypto_sync_skcipher_blocksize(skc->sc_session_kb.kb_tfm);
515         if (gss_add_padding(message, message_buffer_length, blocksize))
516                 return GSS_S_FAILURE;
517
518         memset(token->data, 0, token->len);
519
520         if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
521                 return GSS_S_FAILURE;
522
523         skw.skw_header.data = token->data;
524         skw.skw_header.len = sizeof(skh);
525         memcpy(skw.skw_header.data, &skh, sizeof(skh));
526
527         sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
528         skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
529         skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
530         if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message,
531                               &skw.skw_cipher, 1))
532                 return GSS_S_FAILURE;
533
534         /* HMAC covers the SK header, GSS header, and ciphertext */
535         msgbufs[0] = skw.skw_header;
536         msgbufs[1] = *gss_header;
537         msgbufs[2] = skw.skw_cipher;
538
539         skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
540         skw.skw_hmac.len = sht_bytes;
541         if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key,
542                          3, msgbufs, 0, NULL, &skw.skw_hmac,
543                          gss_context->hash_func))
544                 return GSS_S_FAILURE;
545
546         token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len;
547
548         return GSS_S_COMPLETE;
549 }
550
551 static
552 __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
553                       rawobj_t *token, rawobj_t *message)
554 {
555         struct sk_ctx *skc = gss_context->internal_ctx_id;
556         size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
557         struct sk_wire skw;
558         struct sk_hdr *skh;
559         rawobj_t msgbufs[3];
560         __u8 local_iv[SK_IV_SIZE];
561         unsigned int blocksize;
562         int rc;
563
564         LASSERT(skc->sc_session_kb.kb_tfm);
565
566         if (token->len < sizeof(skh) + sht_bytes)
567                 return GSS_S_DEFECTIVE_TOKEN;
568
569         skw.skw_header.data = token->data;
570         skw.skw_header.len = sizeof(struct sk_hdr);
571         skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
572         skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
573         skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
574         skw.skw_hmac.len = sht_bytes;
575
576         blocksize = crypto_sync_skcipher_blocksize(skc->sc_session_kb.kb_tfm);
577         if (skw.skw_cipher.len % blocksize != 0)
578                 return GSS_S_DEFECTIVE_TOKEN;
579
580         skh = (struct sk_hdr *)skw.skw_header.data;
581         rc = sk_verify_header(skh);
582         if (rc != GSS_S_COMPLETE)
583                 return rc;
584
585         /* HMAC covers the SK header, GSS header, and ciphertext */
586         msgbufs[0] = skw.skw_header;
587         msgbufs[1] = *gss_header;
588         msgbufs[2] = skw.skw_cipher;
589         rc = sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key, 3, msgbufs,
590                             0, NULL, &skw.skw_hmac, gss_context->hash_func);
591         if (rc)
592                 return rc;
593
594         sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
595         message->len = skw.skw_cipher.len;
596         if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv,
597                               1, &skw.skw_cipher, message, 0))
598                 return GSS_S_FAILURE;
599
600         return GSS_S_COMPLETE;
601 }
602
603 static
604 __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
605                        struct ptlrpc_bulk_desc *desc)
606 {
607         struct sk_ctx *skc = gss_context->internal_ctx_id;
608         int blocksize;
609         int i;
610
611         LASSERT(skc->sc_session_kb.kb_tfm);
612         blocksize = crypto_sync_skcipher_blocksize(skc->sc_session_kb.kb_tfm);
613
614         for (i = 0; i < desc->bd_iov_count; i++) {
615                 if (desc->bd_vec[i].bv_offset & blocksize) {
616                         CERROR("offset %d not blocksize aligned\n",
617                                desc->bd_vec[i].bv_offset);
618                         return GSS_S_FAILURE;
619                 }
620
621                 desc->bd_enc_vec[i].bv_offset =
622                         desc->bd_vec[i].bv_offset;
623                 desc->bd_enc_vec[i].bv_len =
624                         sk_block_mask(desc->bd_vec[i].bv_len, blocksize);
625         }
626
627         return GSS_S_COMPLETE;
628 }
629
630 static __u32 sk_encrypt_bulk(struct crypto_sync_skcipher *tfm, __u8 *iv,
631                              struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
632                              int adj_nob)
633 {
634         struct scatterlist ptxt;
635         struct scatterlist ctxt;
636         int blocksize;
637         int i;
638         int rc;
639         int nob = 0;
640         SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
641
642         blocksize = crypto_sync_skcipher_blocksize(tfm);
643
644         sg_init_table(&ptxt, 1);
645         sg_init_table(&ctxt, 1);
646
647         skcipher_request_set_sync_tfm(req, tfm);
648         skcipher_request_set_callback(req, 0, NULL, NULL);
649
650         for (i = 0; i < desc->bd_iov_count; i++) {
651                 sg_set_page(&ptxt, desc->bd_vec[i].bv_page,
652                             sk_block_mask(desc->bd_vec[i].bv_len,
653                                           blocksize),
654                             desc->bd_vec[i].bv_offset);
655                 nob += ptxt.length;
656
657                 sg_set_page(&ctxt, desc->bd_enc_vec[i].bv_page,
658                             ptxt.length, ptxt.offset);
659
660                 desc->bd_enc_vec[i].bv_offset = ctxt.offset;
661                 desc->bd_enc_vec[i].bv_len = ctxt.length;
662
663                 skcipher_request_set_crypt(req, &ptxt, &ctxt, ptxt.length, iv);
664                 rc = crypto_skcipher_encrypt_iv(req, &ctxt, &ptxt, ptxt.length);
665                 if (rc) {
666                         CERROR("failed to encrypt page: %d\n", rc);
667                         skcipher_request_zero(req);
668                         return rc;
669                 }
670         }
671         skcipher_request_zero(req);
672
673         if (adj_nob)
674                 desc->bd_nob = nob;
675
676         return 0;
677 }
678
679 static __u32 sk_decrypt_bulk(struct crypto_sync_skcipher *tfm, __u8 *iv,
680                              struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
681                              int adj_nob)
682 {
683         struct scatterlist ptxt;
684         struct scatterlist ctxt;
685         int blocksize;
686         int i;
687         int rc;
688         int pnob = 0;
689         int cnob = 0;
690         SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
691
692         sg_init_table(&ptxt, 1);
693         sg_init_table(&ctxt, 1);
694
695         blocksize = crypto_sync_skcipher_blocksize(tfm);
696         if (desc->bd_nob_transferred % blocksize != 0) {
697                 CERROR("Transfer not a multiple of block size: %d\n",
698                        desc->bd_nob_transferred);
699                 return GSS_S_DEFECTIVE_TOKEN;
700         }
701
702         skcipher_request_set_sync_tfm(req, tfm);
703         skcipher_request_set_callback(req, 0, NULL, NULL);
704
705         for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
706              i++) {
707                 struct bio_vec *piov = &desc->bd_vec[i];
708                 struct bio_vec *ciov = &desc->bd_enc_vec[i];
709
710                 if (ciov->bv_offset % blocksize != 0 ||
711                     ciov->bv_len % blocksize != 0) {
712                         CERROR("Invalid bulk descriptor vector\n");
713                         skcipher_request_zero(req);
714                         return GSS_S_DEFECTIVE_TOKEN;
715                 }
716
717                 /* Must adjust bytes here because we know the actual sizes after
718                  * decryption.  Similar to what gss_cli_ctx_unwrap_bulk does for
719                  * integrity only mode */
720                 if (adj_nob) {
721                         /* cipher text must not exceed transferred size */
722                         if (ciov->bv_len + cnob > desc->bd_nob_transferred)
723                                 ciov->bv_len =
724                                         desc->bd_nob_transferred - cnob;
725
726                         piov->bv_len = ciov->bv_len;
727
728                         /* plain text must not exceed bulk's size */
729                         if (ciov->bv_len + pnob > desc->bd_nob)
730                                 piov->bv_len = desc->bd_nob - pnob;
731                 } else {
732                         /* Taken from krb5_decrypt since it was not verified
733                          * whether or not LNET guarantees these */
734                         if (ciov->bv_len + cnob > desc->bd_nob_transferred ||
735                             piov->bv_len > ciov->bv_len) {
736                                 CERROR("Invalid decrypted length\n");
737                                 skcipher_request_zero(req);
738                                 return GSS_S_FAILURE;
739                         }
740                 }
741
742                 if (ciov->bv_len == 0)
743                         continue;
744
745                 sg_init_table(&ctxt, 1);
746                 sg_set_page(&ctxt, ciov->bv_page, ciov->bv_len,
747                             ciov->bv_offset);
748                 ptxt = ctxt;
749
750                 /* In the event the plain text size is not a multiple
751                  * of blocksize we decrypt in place and copy the result
752                  * after the decryption */
753                 if (piov->bv_len % blocksize == 0)
754                         sg_assign_page(&ptxt, piov->bv_page);
755
756                 skcipher_request_set_crypt(req, &ctxt, &ptxt, ptxt.length, iv);
757                 rc = crypto_skcipher_decrypt_iv(req, &ptxt, &ctxt, ptxt.length);
758                 if (rc) {
759                         CERROR("Decryption failed for page: %d\n", rc);
760                         skcipher_request_zero(req);
761                         return GSS_S_FAILURE;
762                 }
763
764                 if (piov->bv_len % blocksize != 0) {
765                         memcpy(page_address(piov->bv_page) +
766                                piov->bv_offset,
767                                page_address(ciov->bv_page) +
768                                ciov->bv_offset,
769                                piov->bv_len);
770                 }
771
772                 cnob += ciov->bv_len;
773                 pnob += piov->bv_len;
774         }
775         skcipher_request_zero(req);
776
777         /* if needed, clear up the rest unused iovs */
778         if (adj_nob)
779                 while (i < desc->bd_iov_count)
780                         desc->bd_vec[i++].bv_len = 0;
781
782         if (unlikely(cnob != desc->bd_nob_transferred)) {
783                 CERROR("%d cipher text transferred but only %d decrypted\n",
784                        desc->bd_nob_transferred, cnob);
785                 return GSS_S_FAILURE;
786         }
787
788         if (unlikely(!adj_nob && pnob != desc->bd_nob)) {
789                 CERROR("%d plain text expected but only %d received\n",
790                        desc->bd_nob, pnob);
791                 return GSS_S_FAILURE;
792         }
793
794         return 0;
795 }
796
797 static
798 __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
799                        struct ptlrpc_bulk_desc *desc, rawobj_t *token,
800                        int adj_nob)
801 {
802         struct sk_ctx *skc = gss_context->internal_ctx_id;
803         size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
804         struct sk_wire skw;
805         struct sk_hdr skh;
806         __u8 local_iv[SK_IV_SIZE];
807
808         LASSERT(skc->sc_session_kb.kb_tfm);
809
810         memset(token->data, 0, token->len);
811         if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
812                 return GSS_S_FAILURE;
813
814         skw.skw_header.data = token->data;
815         skw.skw_header.len = sizeof(skh);
816         memcpy(skw.skw_header.data, &skh, sizeof(skh));
817
818         sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
819         skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
820         skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
821         if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
822                             desc, &skw.skw_cipher, adj_nob))
823                 return GSS_S_FAILURE;
824
825         skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
826         skw.skw_hmac.len = sht_bytes;
827         if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher,
828                          desc->bd_iov_count, desc->bd_enc_vec, &skw.skw_hmac,
829                          gss_context->hash_func))
830                 return GSS_S_FAILURE;
831
832         return GSS_S_COMPLETE;
833 }
834
835 static
836 __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
837                            struct ptlrpc_bulk_desc *desc,
838                            rawobj_t *token, int adj_nob)
839 {
840         struct sk_ctx *skc = gss_context->internal_ctx_id;
841         size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
842         struct sk_wire skw;
843         struct sk_hdr *skh;
844         __u8 local_iv[SK_IV_SIZE];
845         int rc;
846
847         LASSERT(skc->sc_session_kb.kb_tfm);
848
849         if (token->len < sizeof(skh) + sht_bytes)
850                 return GSS_S_DEFECTIVE_TOKEN;
851
852         skw.skw_header.data = token->data;
853         skw.skw_header.len = sizeof(struct sk_hdr);
854         skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
855         skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
856         skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
857         skw.skw_hmac.len = sht_bytes;
858
859         skh = (struct sk_hdr *)skw.skw_header.data;
860         rc = sk_verify_header(skh);
861         if (rc != GSS_S_COMPLETE)
862                 return rc;
863
864         rc = sk_verify_bulk_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1,
865                                  &skw.skw_cipher, desc->bd_iov_count,
866                                  desc->bd_enc_vec, desc->bd_nob,
867                                  &skw.skw_hmac);
868         if (rc)
869                 return rc;
870
871         sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
872         rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
873                              desc, &skw.skw_cipher, adj_nob);
874         if (rc)
875                 return rc;
876
877         return GSS_S_COMPLETE;
878 }
879
880 static
881 void gss_delete_sec_context_sk(void *internal_context)
882 {
883         struct sk_ctx *sk_context = internal_context;
884         sk_delete_context(sk_context);
885 }
886
887 int gss_display_sk(struct gss_ctx *gss_context, char *buf, int bufsize)
888 {
889         return snprintf(buf, bufsize, "sk");
890 }
891
892 static struct gss_api_ops gss_sk_ops = {
893         .gss_import_sec_context     = gss_import_sec_context_sk,
894         .gss_copy_reverse_context   = gss_copy_reverse_context_sk,
895         .gss_inquire_context        = gss_inquire_context_sk,
896         .gss_get_mic                = gss_get_mic_sk,
897         .gss_verify_mic             = gss_verify_mic_sk,
898         .gss_wrap                   = gss_wrap_sk,
899         .gss_unwrap                 = gss_unwrap_sk,
900         .gss_prep_bulk              = gss_prep_bulk_sk,
901         .gss_wrap_bulk              = gss_wrap_bulk_sk,
902         .gss_unwrap_bulk            = gss_unwrap_bulk_sk,
903         .gss_delete_sec_context     = gss_delete_sec_context_sk,
904         .gss_display                = gss_display_sk,
905 };
906
907 static struct subflavor_desc gss_sk_sfs[] = {
908         {
909                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKN,
910                 .sf_qop         = 0,
911                 .sf_service     = SPTLRPC_SVC_NULL,
912                 .sf_name        = "skn"
913         },
914         {
915                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKA,
916                 .sf_qop         = 0,
917                 .sf_service     = SPTLRPC_SVC_AUTH,
918                 .sf_name        = "ska"
919         },
920         {
921                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKI,
922                 .sf_qop         = 0,
923                 .sf_service     = SPTLRPC_SVC_INTG,
924                 .sf_name        = "ski"
925         },
926         {
927                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKPI,
928                 .sf_qop         = 0,
929                 .sf_service     = SPTLRPC_SVC_PRIV,
930                 .sf_name        = "skpi"
931         },
932 };
933
934 static struct gss_api_mech gss_sk_mech = {
935         /* .gm_owner uses default NULL value for THIS_MODULE */
936         .gm_name        = "sk",
937         .gm_oid         = (rawobj_t) {
938                 .len = 12,
939                 .data = "\053\006\001\004\001\311\146\215\126\001\000\001",
940         },
941         .gm_ops         = &gss_sk_ops,
942         .gm_sf_num      = 4,
943         .gm_sfs         = gss_sk_sfs,
944 };
945
946 int __init init_sk_module(void)
947 {
948         int status;
949
950         status = lgss_mech_register(&gss_sk_mech);
951         if (status)
952                 CERROR("Failed to register sk gss mechanism!\n");
953
954         return status;
955 }
956
957 void cleanup_sk_module(void)
958 {
959         lgss_mech_unregister(&gss_sk_mech);
960 }