Whamcloud - gitweb
46a38013c6e224b7ac5bd4b606619414ecbb7536
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_sk_mech.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (C) 2013, 2015, Trustees of Indiana University
24  *
25  * Copyright (c) 2014, 2016, Intel Corporation.
26  *
27  * Author: Jeremy Filizetti <jfilizet@iu.edu>
28  * Author: Andrew Korty <ajk@iu.edu>
29  */
30
31 #define DEBUG_SUBSYSTEM S_SEC
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/crypto.h>
36 #include <linux/mutex.h>
37 #include <crypto/ctr.h>
38
39 #include <libcfs/libcfs_crypto.h>
40 #include <obd.h>
41 #include <obd_class.h>
42 #include <obd_support.h>
43 #include <lustre/lustre_user.h>
44
45 #include "gss_err.h"
46 #include "gss_crypto.h"
47 #include "gss_internal.h"
48 #include "gss_api.h"
49 #include "gss_asn1.h"
50
51 #define SK_INTERFACE_VERSION 1
52 #define SK_MSG_VERSION 1
53 #define SK_MIN_SIZE 8
54 #define SK_IV_SIZE 16
55
56 /* Starting number for reverse contexts.  It is critical to security
57  * that reverse contexts use a different range of numbers than regular
58  * contexts because they are using the same key.  Therefore the IV/nonce
59  * combination must be unique for them.  To accomplish this reverse contexts
60  * use the the negative range of a 64-bit number and regular contexts use the
61  * postive range.  If the same IV/nonce combination were reused it would leak
62  * information about the plaintext. */
63 #define SK_IV_REV_START (1ULL << 63)
64
65 struct sk_ctx {
66         __u16                   sc_pad;
67         __u16                   sc_crypt;
68         __u32                   sc_expire;
69         __u32                   sc_host_random;
70         __u32                   sc_peer_random;
71         atomic64_t              sc_iv;
72         rawobj_t                sc_hmac_key;
73         struct gss_keyblock     sc_session_kb;
74         enum cfs_crypto_hash_alg sc_hmac;
75 };
76
77 struct sk_hdr {
78         __u64                   skh_version;
79         __u64                   skh_iv;
80 } __attribute__((packed));
81
82 /* The format of SK wire data is similar to that of RFC3686 ESP Payload
83  * (section 3) except instead of just an IV there is a struct sk_hdr.
84  * ---------------------------------------------------------------------
85  * | struct sk_hdr | ciphertext (variable size) | HMAC (variable size) |
86  * --------------------------------------------------------------------- */
87 struct sk_wire {
88         rawobj_t                skw_header;
89         rawobj_t                skw_cipher;
90         rawobj_t                skw_hmac;
91 };
92
93 static struct sk_crypt_type sk_crypt_types[] = {
94         [SK_CRYPT_AES256_CTR] = {
95                 .cht_name = "aes256",
96                 .cht_key = 0,
97                 .cht_bytes = 32,
98         },
99 };
100
101 static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
102 {
103         return (len + blocksize - 1) & (~(blocksize - 1));
104 }
105
106 static int sk_fill_header(struct sk_ctx *skc, struct sk_hdr *skh)
107 {
108         __u64 tmp_iv;
109         skh->skh_version = be64_to_cpu(SK_MSG_VERSION);
110
111         /* Always using inc_return so we don't use our initial numbers which
112          * could be the reuse detecting numbers */
113         tmp_iv = atomic64_inc_return(&skc->sc_iv);
114         skh->skh_iv = be64_to_cpu(tmp_iv);
115         if (tmp_iv == 0 || tmp_iv == SK_IV_REV_START) {
116                 CERROR("Counter looped, connection must be reset to avoid "
117                        "plaintext information\n");
118                 return GSS_S_FAILURE;
119         }
120
121         return GSS_S_COMPLETE;
122 }
123
124 static int sk_verify_header(struct sk_hdr *skh)
125 {
126         if (cpu_to_be64(skh->skh_version) != SK_MSG_VERSION)
127                 return GSS_S_DEFECTIVE_TOKEN;
128
129         return GSS_S_COMPLETE;
130 }
131
132 void sk_construct_rfc3686_iv(__u8 *iv, __u32 nonce, __u64 partial_iv)
133 {
134         __u32 ctr = cpu_to_be32(1);
135
136         memcpy(iv, &nonce, CTR_RFC3686_NONCE_SIZE);
137         iv += CTR_RFC3686_NONCE_SIZE;
138         memcpy(iv, &partial_iv, CTR_RFC3686_IV_SIZE);
139         iv += CTR_RFC3686_IV_SIZE;
140         memcpy(iv, &ctr, sizeof(ctr));
141 }
142
143 static int sk_init_keys(struct sk_ctx *skc)
144 {
145         return gss_keyblock_init(&skc->sc_session_kb,
146                                  sk_crypt_types[skc->sc_crypt].cht_name, 0);
147 }
148
149 static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc)
150 {
151         char *ptr = inbuf->data;
152         char *end = inbuf->data + inbuf->len;
153         __u32 tmp;
154
155         /* see sk_serialize_kctx() for format from userspace side */
156         /*  1. Version */
157         if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
158                 CERROR("Failed to read shared key interface version");
159                 return -1;
160         }
161         if (tmp != SK_INTERFACE_VERSION) {
162                 CERROR("Invalid shared key interface version: %d\n", tmp);
163                 return -1;
164         }
165
166         /* 2. HMAC type */
167         if (gss_get_bytes(&ptr, end, &skc->sc_hmac, sizeof(skc->sc_hmac))) {
168                 CERROR("Failed to read HMAC algorithm type");
169                 return -1;
170         }
171         if (skc->sc_hmac >= CFS_HASH_ALG_MAX) {
172                 CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
173                 return -1;
174         }
175
176         /* 3. crypt type */
177         if (gss_get_bytes(&ptr, end, &skc->sc_crypt, sizeof(skc->sc_crypt))) {
178                 CERROR("Failed to read crypt algorithm type");
179                 return -1;
180         }
181         if (skc->sc_crypt <= SK_CRYPT_EMPTY || skc->sc_crypt >= SK_CRYPT_MAX) {
182                 CERROR("Invalid crypt type: %d\n", skc->sc_crypt);
183                 return -1;
184         }
185
186         /* 4. expiration time */
187         if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
188                 CERROR("Failed to read context expiration time");
189                 return -1;
190         }
191         skc->sc_expire = tmp + cfs_time_current_sec();
192
193         /* 5. host random is used as nonce for encryption */
194         if (gss_get_bytes(&ptr, end, &skc->sc_host_random,
195                           sizeof(skc->sc_host_random))) {
196                 CERROR("Failed to read host random ");
197                 return -1;
198         }
199
200         /* 6. peer random is used as nonce for decryption */
201         if (gss_get_bytes(&ptr, end, &skc->sc_peer_random,
202                           sizeof(skc->sc_peer_random))) {
203                 CERROR("Failed to read peer random ");
204                 return -1;
205         }
206
207         /* 7. HMAC key */
208         if (gss_get_rawobj(&ptr, end, &skc->sc_hmac_key)) {
209                 CERROR("Failed to read HMAC key");
210                 return -1;
211         }
212         if (skc->sc_hmac_key.len <= SK_MIN_SIZE) {
213                 CERROR("HMAC key must key must be larger than %d bytes\n",
214                        SK_MIN_SIZE);
215                 return -1;
216         }
217
218         /* 8. Session key, can be empty if not using privacy mode */
219         if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
220                 CERROR("Failed to read session key");
221                 return -1;
222         }
223
224         return 0;
225 }
226
227 static void sk_delete_context(struct sk_ctx *skc)
228 {
229         if (!skc)
230                 return;
231
232         rawobj_free(&skc->sc_hmac_key);
233         gss_keyblock_free(&skc->sc_session_kb);
234         OBD_FREE_PTR(skc);
235 }
236
237 static
238 __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context)
239 {
240         struct sk_ctx *skc;
241         bool privacy = false;
242
243         if (inbuf == NULL || inbuf->data == NULL)
244                 return GSS_S_FAILURE;
245
246         OBD_ALLOC_PTR(skc);
247         if (!skc)
248                 return GSS_S_FAILURE;
249
250         atomic64_set(&skc->sc_iv, 0);
251
252         if (sk_fill_context(inbuf, skc))
253                 goto out_err;
254
255         /* Only privacy mode needs to initialize keys */
256         if (skc->sc_session_kb.kb_key.len > 0) {
257                 privacy = true;
258                 if (sk_init_keys(skc))
259                         goto out_err;
260         }
261
262         gss_context->internal_ctx_id = skc;
263         CDEBUG(D_SEC, "successfully imported sk%s context\n",
264                privacy ? "pi" : "i");
265
266         return GSS_S_COMPLETE;
267
268 out_err:
269         sk_delete_context(skc);
270         return GSS_S_FAILURE;
271 }
272
273 static
274 __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old,
275                                   struct gss_ctx *gss_context_new)
276 {
277         struct sk_ctx *skc_old = gss_context_old->internal_ctx_id;
278         struct sk_ctx *skc_new;
279
280         OBD_ALLOC_PTR(skc_new);
281         if (!skc_new)
282                 return GSS_S_FAILURE;
283
284         skc_new->sc_hmac = skc_old->sc_hmac;
285         skc_new->sc_crypt = skc_old->sc_crypt;
286         skc_new->sc_expire = skc_old->sc_expire;
287         skc_new->sc_host_random = skc_old->sc_host_random;
288         skc_new->sc_peer_random = skc_old->sc_peer_random;
289
290         atomic64_set(&skc_new->sc_iv, SK_IV_REV_START);
291
292         if (rawobj_dup(&skc_new->sc_hmac_key, &skc_old->sc_hmac_key))
293                 goto out_err;
294         if (gss_keyblock_dup(&skc_new->sc_session_kb, &skc_old->sc_session_kb))
295                 goto out_err;
296
297         /* Only privacy mode needs to initialize keys */
298         if (skc_new->sc_session_kb.kb_key.len > 0)
299                 if (sk_init_keys(skc_new))
300                         goto out_err;
301
302         gss_context_new->internal_ctx_id = skc_new;
303         CDEBUG(D_SEC, "successfully copied reverse sk context\n");
304
305         return GSS_S_COMPLETE;
306
307 out_err:
308         sk_delete_context(skc_new);
309         return GSS_S_FAILURE;
310 }
311
312 static
313 __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
314                              unsigned long *endtime)
315 {
316         struct sk_ctx *skc = gss_context->internal_ctx_id;
317
318         *endtime = skc->sc_expire;
319         return GSS_S_COMPLETE;
320 }
321
322 static
323 __u32 sk_make_hmac(const char *alg_name, rawobj_t *key, int msg_count,
324                    rawobj_t *msgs, int iov_count, lnet_kiov_t *iovs,
325                    rawobj_t *token)
326 {
327         struct crypto_ahash *tfm;
328         int rc;
329
330         tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC);
331         if (IS_ERR(tfm))
332                 return GSS_S_FAILURE;
333
334         rc = GSS_S_FAILURE;
335         LASSERT(token->len >= crypto_ahash_digestsize(tfm));
336         if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
337                             token))
338                 rc = GSS_S_COMPLETE;
339
340         crypto_free_ahash(tfm);
341         return rc;
342 }
343
344 static
345 __u32 gss_get_mic_sk(struct gss_ctx *gss_context,
346                      int message_count,
347                      rawobj_t *messages,
348                      int iov_count,
349                      lnet_kiov_t *iovs,
350                      rawobj_t *token)
351 {
352         struct sk_ctx *skc = gss_context->internal_ctx_id;
353         return sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac),
354                             &skc->sc_hmac_key, message_count, messages,
355                             iov_count, iovs, token);
356 }
357
358 static
359 u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key,
360                    int message_count, rawobj_t *messages, int iov_count,
361                    lnet_kiov_t *iovs, rawobj_t *token)
362 {
363         rawobj_t checksum = RAWOBJ_EMPTY;
364         __u32 rc = GSS_S_FAILURE;
365
366         checksum.len = cfs_crypto_hash_digestsize(algo);
367         /* What about checksum.len == 0 ??? */
368
369         if (token->len < checksum.len) {
370                 CDEBUG(D_SEC, "Token received too short, expected %d "
371                        "received %d\n", token->len, checksum.len);
372                 return GSS_S_DEFECTIVE_TOKEN;
373         }
374
375         OBD_ALLOC_LARGE(checksum.data, checksum.len);
376         if (!checksum.data)
377                 return rc;
378
379         if (sk_make_hmac(cfs_crypto_hash_name(algo), key, message_count,
380                          messages, iov_count, iovs, &checksum)) {
381                 CDEBUG(D_SEC, "Failed to create checksum to validate\n");
382                 goto cleanup;
383         }
384
385         if (memcmp(token->data, checksum.data, checksum.len)) {
386                 CERROR("checksum mismatch\n");
387                 rc = GSS_S_BAD_SIG;
388                 goto cleanup;
389         }
390
391         rc = GSS_S_COMPLETE;
392
393 cleanup:
394         OBD_FREE(checksum.data, checksum.len);
395         return rc;
396 }
397
398 /* sk_verify_bulk_hmac() differs slightly from sk_verify_hmac() because all
399  * encrypted pages in the bulk descriptor are populated although we only need
400  * to decrypt up to the number of bytes actually specified from the sender
401  * (bd_nob) otherwise the calulated HMAC will be incorrect. */
402 static
403 __u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac,
404                           rawobj_t *key, int msgcnt, rawobj_t *msgs,
405                           int iovcnt, lnet_kiov_t *iovs, int iov_bytes,
406                           rawobj_t *token)
407 {
408         rawobj_t checksum = RAWOBJ_EMPTY;
409         struct cfs_crypto_hash_desc *hdesc;
410         int rc = GSS_S_FAILURE, i;
411
412         checksum.len = cfs_crypto_hash_digestsize(sc_hmac);
413         if (token->len < checksum.len) {
414                 CDEBUG(D_SEC, "Token received too short, expected %d "
415                        "received %d\n", token->len, checksum.len);
416                 return GSS_S_DEFECTIVE_TOKEN;
417         }
418
419         OBD_ALLOC_LARGE(checksum.data, checksum.len);
420         if (!checksum.data)
421                 return rc;
422
423         for (i = 0; i < msgcnt; i++) {
424                 if (!msgs[i].len)
425                         continue;
426
427                 rc = cfs_crypto_hash_digest(sc_hmac, msgs[i].data, msgs[i].len,
428                                             key->data, key->len,
429                                             checksum.data, &checksum.len);
430                 if (rc)
431                         goto cleanup;
432         }
433
434         hdesc = cfs_crypto_hash_init(sc_hmac, key->data, key->len);
435         if (IS_ERR(hdesc)) {
436                 rc = PTR_ERR(hdesc);
437                 goto cleanup;
438         }
439
440         for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
441                 int bytes;
442
443                 if (iovs[i].kiov_len == 0)
444                         continue;
445
446                 bytes = min_t(int, iov_bytes, iovs[i].kiov_len);
447                 iov_bytes -= bytes;
448                 rc = cfs_crypto_hash_update_page(hdesc, iovs[i].kiov_page,
449                                                  iovs[i].kiov_offset, bytes);
450                 if (rc)
451                         goto cleanup;
452         }
453
454         rc = cfs_crypto_hash_final(hdesc, checksum.data, &checksum.len);
455         if (rc)
456                 goto cleanup;
457
458         if (memcmp(token->data, checksum.data, checksum.len)) {
459                 rc = GSS_S_BAD_SIG;
460                 goto cleanup;
461         }
462
463         rc = GSS_S_COMPLETE;
464 cleanup:
465         OBD_FREE_LARGE(checksum.data, checksum.len);
466
467         return rc;
468 }
469
470 static
471 __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
472                         int message_count,
473                         rawobj_t *messages,
474                         int iov_count,
475                         lnet_kiov_t *iovs,
476                         rawobj_t *token)
477 {
478         struct sk_ctx *skc = gss_context->internal_ctx_id;
479         return sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key,
480                               message_count, messages, iov_count, iovs, token);
481 }
482
483 static
484 __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
485                     rawobj_t *message, int message_buffer_length,
486                     rawobj_t *token)
487 {
488         struct sk_ctx *skc = gss_context->internal_ctx_id;
489         size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
490         struct sk_wire skw;
491         struct sk_hdr skh;
492         rawobj_t msgbufs[3];
493         __u8 local_iv[SK_IV_SIZE];
494         unsigned int blocksize;
495
496         LASSERT(skc->sc_session_kb.kb_tfm);
497
498         blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
499         if (gss_add_padding(message, message_buffer_length, blocksize))
500                 return GSS_S_FAILURE;
501
502         memset(token->data, 0, token->len);
503
504         if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
505                 return GSS_S_FAILURE;
506
507         skw.skw_header.data = token->data;
508         skw.skw_header.len = sizeof(skh);
509         memcpy(skw.skw_header.data, &skh, sizeof(skh));
510
511         sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
512         skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
513         skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
514         if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message,
515                               &skw.skw_cipher, 1))
516                 return GSS_S_FAILURE;
517
518         /* HMAC covers the SK header, GSS header, and ciphertext */
519         msgbufs[0] = skw.skw_header;
520         msgbufs[1] = *gss_header;
521         msgbufs[2] = skw.skw_cipher;
522
523         skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
524         skw.skw_hmac.len = sht_bytes;
525         if (sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac), &skc->sc_hmac_key,
526                          3, msgbufs, 0, NULL, &skw.skw_hmac))
527                 return GSS_S_FAILURE;
528
529         token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len;
530
531         return GSS_S_COMPLETE;
532 }
533
534 static
535 __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
536                       rawobj_t *token, rawobj_t *message)
537 {
538         struct sk_ctx *skc = gss_context->internal_ctx_id;
539         size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
540         struct sk_wire skw;
541         struct sk_hdr *skh;
542         rawobj_t msgbufs[3];
543         __u8 local_iv[SK_IV_SIZE];
544         unsigned int blocksize;
545         int rc;
546
547         LASSERT(skc->sc_session_kb.kb_tfm);
548
549         if (token->len < sizeof(skh) + sht_bytes)
550                 return GSS_S_DEFECTIVE_TOKEN;
551
552         skw.skw_header.data = token->data;
553         skw.skw_header.len = sizeof(struct sk_hdr);
554         skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
555         skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
556         skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
557         skw.skw_hmac.len = sht_bytes;
558
559         blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
560         if (skw.skw_cipher.len % blocksize != 0)
561                 return GSS_S_DEFECTIVE_TOKEN;
562
563         skh = (struct sk_hdr *)skw.skw_header.data;
564         rc = sk_verify_header(skh);
565         if (rc != GSS_S_COMPLETE)
566                 return rc;
567
568         /* HMAC covers the SK header, GSS header, and ciphertext */
569         msgbufs[0] = skw.skw_header;
570         msgbufs[1] = *gss_header;
571         msgbufs[2] = skw.skw_cipher;
572         rc = sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key, 3, msgbufs,
573                             0, NULL, &skw.skw_hmac);
574         if (rc)
575                 return rc;
576
577         sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
578         message->len = skw.skw_cipher.len;
579         if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv,
580                               1, &skw.skw_cipher, message, 0))
581                 return GSS_S_FAILURE;
582
583         return GSS_S_COMPLETE;
584 }
585
586 static
587 __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
588                        struct ptlrpc_bulk_desc *desc)
589 {
590         struct sk_ctx *skc = gss_context->internal_ctx_id;
591         int blocksize;
592         int i;
593
594         LASSERT(skc->sc_session_kb.kb_tfm);
595         blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
596
597         for (i = 0; i < desc->bd_iov_count; i++) {
598                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
599                         CERROR("offset %d not blocksize aligned\n",
600                                BD_GET_KIOV(desc, i).kiov_offset);
601                         return GSS_S_FAILURE;
602                 }
603
604                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
605                         BD_GET_KIOV(desc, i).kiov_offset;
606                 BD_GET_ENC_KIOV(desc, i).kiov_len =
607                         sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize);
608         }
609
610         return GSS_S_COMPLETE;
611 }
612
613 static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
614                              struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
615                              int adj_nob)
616 {
617         struct blkcipher_desc cdesc = {
618                 .tfm = tfm,
619                 .info = iv,
620                 .flags = 0,
621         };
622         struct scatterlist ptxt;
623         struct scatterlist ctxt;
624         int blocksize;
625         int i;
626         int rc;
627         int nob = 0;
628
629         blocksize = crypto_blkcipher_blocksize(tfm);
630
631         sg_init_table(&ptxt, 1);
632         sg_init_table(&ctxt, 1);
633
634         for (i = 0; i < desc->bd_iov_count; i++) {
635                 sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page,
636                             sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
637                                           blocksize),
638                             BD_GET_KIOV(desc, i).kiov_offset);
639                 nob += ptxt.length;
640
641                 sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
642                             ptxt.length, ptxt.offset);
643
644                 BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
645                 BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
646
647                 rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
648                                                  ptxt.length);
649                 if (rc) {
650                         CERROR("failed to encrypt page: %d\n", rc);
651                         return rc;
652                 }
653         }
654
655         if (adj_nob)
656                 desc->bd_nob = nob;
657
658         return 0;
659 }
660
661 static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
662                              struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
663                              int adj_nob)
664 {
665         struct blkcipher_desc cdesc = {
666                 .tfm = tfm,
667                 .info = iv,
668                 .flags = 0,
669         };
670         struct scatterlist ptxt;
671         struct scatterlist ctxt;
672         int blocksize;
673         int i;
674         int rc;
675         int pnob = 0;
676         int cnob = 0;
677
678         sg_init_table(&ptxt, 1);
679         sg_init_table(&ctxt, 1);
680
681         blocksize = crypto_blkcipher_blocksize(tfm);
682         if (desc->bd_nob_transferred % blocksize != 0) {
683                 CERROR("Transfer not a multiple of block size: %d\n",
684                        desc->bd_nob_transferred);
685                 return GSS_S_DEFECTIVE_TOKEN;
686         }
687
688         for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
689              i++) {
690                 lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
691                 lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
692
693                 if (ciov->kiov_offset % blocksize != 0 ||
694                     ciov->kiov_len % blocksize != 0) {
695                         CERROR("Invalid bulk descriptor vector\n");
696                         return GSS_S_DEFECTIVE_TOKEN;
697                 }
698
699                 /* Must adjust bytes here because we know the actual sizes after
700                  * decryption.  Similar to what gss_cli_ctx_unwrap_bulk does for
701                  * integrity only mode */
702                 if (adj_nob) {
703                         /* cipher text must not exceed transferred size */
704                         if (ciov->kiov_len + cnob > desc->bd_nob_transferred)
705                                 ciov->kiov_len =
706                                         desc->bd_nob_transferred - cnob;
707
708                         piov->kiov_len = ciov->kiov_len;
709
710                         /* plain text must not exceed bulk's size */
711                         if (ciov->kiov_len + pnob > desc->bd_nob)
712                                 piov->kiov_len = desc->bd_nob - pnob;
713                 } else {
714                         /* Taken from krb5_decrypt since it was not verified
715                          * whether or not LNET guarantees these */
716                         if (ciov->kiov_len + cnob > desc->bd_nob_transferred ||
717                             piov->kiov_len > ciov->kiov_len) {
718                                 CERROR("Invalid decrypted length\n");
719                                 return GSS_S_FAILURE;
720                         }
721                 }
722
723                 if (ciov->kiov_len == 0)
724                         continue;
725
726                 sg_init_table(&ctxt, 1);
727                 sg_set_page(&ctxt, ciov->kiov_page, ciov->kiov_len,
728                             ciov->kiov_offset);
729                 ptxt = ctxt;
730
731                 /* In the event the plain text size is not a multiple
732                  * of blocksize we decrypt in place and copy the result
733                  * after the decryption */
734                 if (piov->kiov_len % blocksize == 0)
735                         sg_assign_page(&ptxt, piov->kiov_page);
736
737                 rc = crypto_blkcipher_decrypt_iv(&cdesc, &ptxt, &ctxt,
738                                                  ctxt.length);
739                 if (rc) {
740                         CERROR("Decryption failed for page: %d\n", rc);
741                         return GSS_S_FAILURE;
742                 }
743
744                 if (piov->kiov_len % blocksize != 0) {
745                         memcpy(page_address(piov->kiov_page) +
746                                piov->kiov_offset,
747                                page_address(ciov->kiov_page) +
748                                ciov->kiov_offset,
749                                piov->kiov_len);
750                 }
751
752                 cnob += ciov->kiov_len;
753                 pnob += piov->kiov_len;
754         }
755
756         /* if needed, clear up the rest unused iovs */
757         if (adj_nob)
758                 while (i < desc->bd_iov_count)
759                         BD_GET_KIOV(desc, i++).kiov_len = 0;
760
761         if (unlikely(cnob != desc->bd_nob_transferred)) {
762                 CERROR("%d cipher text transferred but only %d decrypted\n",
763                        desc->bd_nob_transferred, cnob);
764                 return GSS_S_FAILURE;
765         }
766
767         if (unlikely(!adj_nob && pnob != desc->bd_nob)) {
768                 CERROR("%d plain text expected but only %d received\n",
769                        desc->bd_nob, pnob);
770                 return GSS_S_FAILURE;
771         }
772
773         return 0;
774 }
775
776 static
777 __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
778                        struct ptlrpc_bulk_desc *desc, rawobj_t *token,
779                        int adj_nob)
780 {
781         struct sk_ctx *skc = gss_context->internal_ctx_id;
782         size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
783         struct sk_wire skw;
784         struct sk_hdr skh;
785         __u8 local_iv[SK_IV_SIZE];
786
787         LASSERT(skc->sc_session_kb.kb_tfm);
788
789         memset(token->data, 0, token->len);
790         if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
791                 return GSS_S_FAILURE;
792
793         skw.skw_header.data = token->data;
794         skw.skw_header.len = sizeof(skh);
795         memcpy(skw.skw_header.data, &skh, sizeof(skh));
796
797         sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
798         skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
799         skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
800         if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
801                             desc, &skw.skw_cipher, adj_nob))
802                 return GSS_S_FAILURE;
803
804         skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
805         skw.skw_hmac.len = sht_bytes;
806         if (sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac), &skc->sc_hmac_key,
807                          1, &skw.skw_cipher, desc->bd_iov_count,
808                          GET_ENC_KIOV(desc), &skw.skw_hmac))
809                 return GSS_S_FAILURE;
810
811         return GSS_S_COMPLETE;
812 }
813
814 static
815 __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
816                            struct ptlrpc_bulk_desc *desc,
817                            rawobj_t *token, int adj_nob)
818 {
819         struct sk_ctx *skc = gss_context->internal_ctx_id;
820         size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
821         struct sk_wire skw;
822         struct sk_hdr *skh;
823         __u8 local_iv[SK_IV_SIZE];
824         int rc;
825
826         LASSERT(skc->sc_session_kb.kb_tfm);
827
828         if (token->len < sizeof(skh) + sht_bytes)
829                 return GSS_S_DEFECTIVE_TOKEN;
830
831         skw.skw_header.data = token->data;
832         skw.skw_header.len = sizeof(struct sk_hdr);
833         skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
834         skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
835         skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
836         skw.skw_hmac.len = cfs_crypto_hash_digestsize(skc->sc_hmac);
837
838         skh = (struct sk_hdr *)skw.skw_header.data;
839         rc = sk_verify_header(skh);
840         if (rc != GSS_S_COMPLETE)
841                 return rc;
842
843         rc = sk_verify_bulk_hmac(skc->sc_hmac,
844                                  &skc->sc_hmac_key, 1, &skw.skw_cipher,
845                                  desc->bd_iov_count, GET_ENC_KIOV(desc),
846                                  desc->bd_nob, &skw.skw_hmac);
847         if (rc)
848                 return rc;
849
850         sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
851         rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
852                              desc, &skw.skw_cipher, adj_nob);
853         if (rc)
854                 return rc;
855
856         return GSS_S_COMPLETE;
857 }
858
859 static
860 void gss_delete_sec_context_sk(void *internal_context)
861 {
862         struct sk_ctx *sk_context = internal_context;
863         sk_delete_context(sk_context);
864 }
865
866 int gss_display_sk(struct gss_ctx *gss_context, char *buf, int bufsize)
867 {
868         return snprintf(buf, bufsize, "sk");
869 }
870
871 static struct gss_api_ops gss_sk_ops = {
872         .gss_import_sec_context     = gss_import_sec_context_sk,
873         .gss_copy_reverse_context   = gss_copy_reverse_context_sk,
874         .gss_inquire_context        = gss_inquire_context_sk,
875         .gss_get_mic                = gss_get_mic_sk,
876         .gss_verify_mic             = gss_verify_mic_sk,
877         .gss_wrap                   = gss_wrap_sk,
878         .gss_unwrap                 = gss_unwrap_sk,
879         .gss_prep_bulk              = gss_prep_bulk_sk,
880         .gss_wrap_bulk              = gss_wrap_bulk_sk,
881         .gss_unwrap_bulk            = gss_unwrap_bulk_sk,
882         .gss_delete_sec_context     = gss_delete_sec_context_sk,
883         .gss_display                = gss_display_sk,
884 };
885
886 static struct subflavor_desc gss_sk_sfs[] = {
887         {
888                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKN,
889                 .sf_qop         = 0,
890                 .sf_service     = SPTLRPC_SVC_NULL,
891                 .sf_name        = "skn"
892         },
893         {
894                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKA,
895                 .sf_qop         = 0,
896                 .sf_service     = SPTLRPC_SVC_AUTH,
897                 .sf_name        = "ska"
898         },
899         {
900                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKI,
901                 .sf_qop         = 0,
902                 .sf_service     = SPTLRPC_SVC_INTG,
903                 .sf_name        = "ski"
904         },
905         {
906                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKPI,
907                 .sf_qop         = 0,
908                 .sf_service     = SPTLRPC_SVC_PRIV,
909                 .sf_name        = "skpi"
910         },
911 };
912
913 static struct gss_api_mech gss_sk_mech = {
914         /* .gm_owner uses default NULL value for THIS_MODULE */
915         .gm_name        = "sk",
916         .gm_oid         = (rawobj_t) {
917                 .len = 12,
918                 .data = "\053\006\001\004\001\311\146\215\126\001\000\001",
919         },
920         .gm_ops         = &gss_sk_ops,
921         .gm_sf_num      = 4,
922         .gm_sfs         = gss_sk_sfs,
923 };
924
925 int __init init_sk_module(void)
926 {
927         int status;
928
929         status = lgss_mech_register(&gss_sk_mech);
930         if (status)
931                 CERROR("Failed to register sk gss mechanism!\n");
932
933         return status;
934 }
935
936 void cleanup_sk_module(void)
937 {
938         lgss_mech_unregister(&gss_sk_mech);
939 }