Whamcloud - gitweb
dbcb1eeed2e2c7c2e6a8557df9e9b28154f6973f
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_sk_mech.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (C) 2013, 2015, Trustees of Indiana University
24  *
25  * Copyright (c) 2014, Intel Corporation.
26  *
27  * Author: Jeremy Filizetti <jfilizet@iu.edu>
28  * Author: Andrew Korty <ajk@iu.edu>
29  */
30
31 #define DEBUG_SUBSYSTEM S_SEC
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/crypto.h>
36 #include <linux/mutex.h>
37
38 #include <obd.h>
39 #include <obd_class.h>
40 #include <obd_support.h>
41 #include <lustre/lustre_user.h>
42
43 #include "gss_err.h"
44 #include "gss_crypto.h"
45 #include "gss_internal.h"
46 #include "gss_api.h"
47 #include "gss_asn1.h"
48
49 #define SK_INTERFACE_VERSION 1
50 #define SK_MIN_SIZE 8
51
52 struct sk_ctx {
53         __u32                   sc_version;
54         __u16                   sc_hmac;
55         __u16                   sc_crypt;
56         __u32                   sc_expire;
57         rawobj_t                sc_shared_key;
58         rawobj_t                sc_iv;
59         struct gss_keyblock     sc_session_kb;
60 };
61
62 static struct sk_crypt_type sk_crypt_types[] = {
63         [SK_CRYPT_AES256_CTR] = {
64                 .sct_name = "ctr(aes256)",
65                 .sct_bytes = 32,
66         },
67 };
68
69 static struct sk_hmac_type sk_hmac_types[] = {
70         [SK_HMAC_SHA256] = {
71                 .sht_name = "hmac(sha256)",
72                 .sht_bytes = 32,
73         },
74         [SK_HMAC_SHA512] = {
75                 .sht_name = "hmac(sha512)",
76                 .sht_bytes = 64,
77         },
78 };
79
80 static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
81 {
82         return (len + blocksize - 1) & (~(blocksize - 1));
83 }
84
85 static int sk_init_keys(struct sk_ctx *skc)
86 {
87         int rc;
88         unsigned int ivsize;
89
90         rc = gss_keyblock_init(&skc->sc_session_kb,
91                                sk_crypt_types[skc->sc_crypt].sct_name, 0);
92         if (rc)
93                 return rc;
94
95         ivsize = crypto_blkcipher_ivsize(skc->sc_session_kb.kb_tfm);
96         if (skc->sc_iv.len != ivsize) {
97                 CERROR("IV size for algorithm (%d) does not match provided IV "
98                        "size: %d\n", ivsize, skc->sc_iv.len);
99                 return -EINVAL;
100         }
101
102         crypto_blkcipher_set_iv(skc->sc_session_kb.kb_tfm,
103                                        skc->sc_iv.data, skc->sc_iv.len);
104
105         return 0;
106 }
107
108 static int fill_sk_context(rawobj_t *inbuf, struct sk_ctx *skc)
109 {
110         char *ptr = inbuf->data;
111         char *end = inbuf->data + inbuf->len;
112         __u32 tmp;
113
114         /* see sk_serialize_kctx() for format from userspace side */
115         /*  1. Version */
116         if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
117                 CERROR("Failed to read shared key interface version");
118                 return -1;
119         }
120         if (tmp != SK_INTERFACE_VERSION) {
121                 CERROR("Invalid shared key interface version: %d\n", tmp);
122                 return -1;
123         }
124
125         /* 2. HMAC type */
126         if (gss_get_bytes(&ptr, end, &skc->sc_hmac, sizeof(skc->sc_hmac))) {
127                 CERROR("Failed to read HMAC algorithm type");
128                 return -1;
129         }
130         if (skc->sc_hmac >= SK_HMAC_MAX) {
131                 CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
132                 return -1;
133         }
134
135         /* 3. crypt type */
136         if (gss_get_bytes(&ptr, end, &skc->sc_crypt, sizeof(skc->sc_crypt))) {
137                 CERROR("Failed to read crypt algorithm type");
138                 return -1;
139         }
140         if (skc->sc_crypt >= SK_CRYPT_MAX) {
141                 CERROR("Invalid crypt type: %d\n", skc->sc_crypt);
142                 return -1;
143         }
144
145         /* 4. expiration time */
146         if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
147                 CERROR("Failed to read context expiration time");
148                 return -1;
149         }
150         skc->sc_expire = tmp + cfs_time_current_sec();
151
152         /* 5. Shared key */
153         if (gss_get_rawobj(&ptr, end, &skc->sc_shared_key)) {
154                 CERROR("Failed to read shared key");
155                 return -1;
156         }
157         if (skc->sc_shared_key.len <= SK_MIN_SIZE) {
158                 CERROR("Shared key must key must be larger than %d bytes\n",
159                        SK_MIN_SIZE);
160                 return -1;
161         }
162
163         /* 6. IV, can be empty if not using privacy mode */
164         if (gss_get_rawobj(&ptr, end, &skc->sc_iv)) {
165                 CERROR("Failed to read initialization vector ");
166                 return -1;
167         }
168
169         /* 7. Session key, can be empty if not using privacy mode */
170         if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
171                 CERROR("Failed to read session key");
172                 return -1;
173         }
174
175         return 0;
176 }
177
178 static void delete_sk_context(struct sk_ctx *skc)
179 {
180         if (!skc)
181                 return;
182         gss_keyblock_free(&skc->sc_session_kb);
183         rawobj_free(&skc->sc_iv);
184         rawobj_free(&skc->sc_shared_key);
185 }
186
187 static
188 __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context)
189 {
190         struct sk_ctx *skc;
191         bool privacy = false;
192
193         if (inbuf == NULL || inbuf->data == NULL)
194                 return GSS_S_FAILURE;
195
196         OBD_ALLOC_PTR(skc);
197         if (!skc)
198                 return GSS_S_FAILURE;
199
200         if (fill_sk_context(inbuf, skc))
201                 goto out_error;
202
203         /* Only privacy mode needs to initialize keys */
204         if (skc->sc_session_kb.kb_key.len > 0) {
205                 privacy = true;
206                 if (sk_init_keys(skc))
207                         goto out_error;
208         }
209
210         gss_context->internal_ctx_id = skc;
211         CDEBUG(D_SEC, "successfully imported sk%s context\n",
212                privacy ? "pi" : "i");
213
214         return GSS_S_COMPLETE;
215
216 out_error:
217         delete_sk_context(skc);
218         OBD_FREE_PTR(skc);
219         return GSS_S_FAILURE;
220 }
221
222 static
223 __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old,
224                                   struct gss_ctx *gss_context_new)
225 {
226         struct sk_ctx *skc_old = gss_context_old->internal_ctx_id;
227         struct sk_ctx *skc_new;
228
229         OBD_ALLOC_PTR(skc_new);
230         if (!skc_new)
231                 return GSS_S_FAILURE;
232
233         skc_new->sc_crypt = skc_old->sc_crypt;
234         skc_new->sc_hmac = skc_old->sc_hmac;
235         skc_new->sc_expire = skc_old->sc_expire;
236         if (rawobj_dup(&skc_new->sc_shared_key, &skc_old->sc_shared_key))
237                 goto out_error;
238         if (rawobj_dup(&skc_new->sc_iv, &skc_old->sc_iv))
239                 goto out_error;
240         if (gss_keyblock_dup(&skc_new->sc_session_kb, &skc_old->sc_session_kb))
241                 goto out_error;
242
243         /* Only privacy mode needs to initialize keys */
244         if (skc_new->sc_session_kb.kb_key.len > 0)
245                 if (sk_init_keys(skc_new))
246                         goto out_error;
247
248         gss_context_new->internal_ctx_id = skc_new;
249         CDEBUG(D_SEC, "successfully copied reverse sk context\n");
250
251         return GSS_S_COMPLETE;
252
253 out_error:
254         delete_sk_context(skc_new);
255         OBD_FREE_PTR(skc_new);
256         return GSS_S_FAILURE;
257 }
258
259 static
260 __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
261                              unsigned long *endtime)
262 {
263         struct sk_ctx *skc = gss_context->internal_ctx_id;
264
265         *endtime = skc->sc_expire;
266         return GSS_S_COMPLETE;
267 }
268
269 static
270 __u32 sk_make_checksum(char *alg_name, rawobj_t *key,
271                        int msg_count, rawobj_t *msgs,
272                        int iov_count, lnet_kiov_t *iovs,
273                        rawobj_t *token)
274 {
275         struct crypto_hash *tfm;
276         int rc;
277
278         tfm = crypto_alloc_hash(alg_name, 0, 0);
279         if (!tfm)
280                 return GSS_S_FAILURE;
281
282         rc = GSS_S_FAILURE;
283         LASSERT(token->len >= crypto_hash_digestsize(tfm));
284         if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
285                             token))
286                 rc = GSS_S_COMPLETE;
287
288         crypto_free_hash(tfm);
289         return rc;
290 }
291
292 static
293 __u32 gss_get_mic_sk(struct gss_ctx *gss_context,
294                      int message_count,
295                      rawobj_t *messages,
296                      int iov_count,
297                      lnet_kiov_t *iovs,
298                      rawobj_t *token)
299 {
300         struct sk_ctx *skc = gss_context->internal_ctx_id;
301         return sk_make_checksum(sk_hmac_types[skc->sc_hmac].sht_name,
302                                 &skc->sc_shared_key, message_count, messages,
303                                 iov_count, iovs, token);
304 }
305
306 static
307 __u32 sk_verify_checksum(struct sk_hmac_type *sht,
308                          rawobj_t *key,
309                          int message_count,
310                          rawobj_t *messages,
311                          int iov_count,
312                          lnet_kiov_t *iovs,
313                          rawobj_t *token)
314 {
315         rawobj_t checksum = RAWOBJ_EMPTY;
316         __u32 rc = GSS_S_FAILURE;
317
318         checksum.len = sht->sht_bytes;
319         if (token->len < checksum.len) {
320                 CDEBUG(D_SEC, "Token received too short, expected %d "
321                        "received %d\n", token->len, checksum.len);
322                 return GSS_S_DEFECTIVE_TOKEN;
323         }
324
325         OBD_ALLOC_LARGE(checksum.data, checksum.len);
326         if (!checksum.data)
327                 return rc;
328
329         if (sk_make_checksum(sht->sht_name, key, message_count,
330                              messages, iov_count, iovs, &checksum)) {
331                 CDEBUG(D_SEC, "Failed to create checksum to validate\n");
332                 goto cleanup;
333         }
334
335         if (memcmp(token->data, checksum.data, checksum.len)) {
336                 CERROR("checksum mismatch\n");
337                 rc = GSS_S_BAD_SIG;
338                 goto cleanup;
339         }
340
341         rc = GSS_S_COMPLETE;
342
343 cleanup:
344         OBD_FREE(checksum.data, checksum.len);
345         return rc;
346 }
347
348 static
349 __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
350                         int message_count,
351                         rawobj_t *messages,
352                         int iov_count,
353                         lnet_kiov_t *iovs,
354                         rawobj_t *token)
355 {
356         struct sk_ctx *skc = gss_context->internal_ctx_id;
357         return sk_verify_checksum(&sk_hmac_types[skc->sc_hmac],
358                                   &skc->sc_shared_key, message_count, messages,
359                                   iov_count, iovs, token);
360 }
361
362 static
363 __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
364                     rawobj_t *message, int message_buffer_length,
365                     rawobj_t *token)
366 {
367         struct sk_ctx *skc = gss_context->internal_ctx_id;
368         struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
369         rawobj_t msgbufs[2];
370         rawobj_t cipher;
371         rawobj_t checksum;
372         unsigned int blocksize;
373
374         LASSERT(skc->sc_session_kb.kb_tfm);
375         blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
376
377         if (gss_add_padding(message, message_buffer_length, blocksize))
378                 return GSS_S_FAILURE;
379
380         /* Only encrypting the message data */
381         cipher.data = token->data;
382         cipher.len = token->len - sht->sht_bytes;
383         if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, 0, 1, message,
384                               &cipher, 1))
385                 return GSS_S_FAILURE;
386
387         /* Checksum covers the GSS header followed by the encrypted message */
388         msgbufs[0].len = gss_header->len;
389         msgbufs[0].data = gss_header->data;
390         msgbufs[1].len = cipher.len;
391         msgbufs[1].data = cipher.data;
392
393         LASSERT(cipher.len + sht->sht_bytes <= token->len);
394         checksum.data = token->data + cipher.len;
395         checksum.len = sht->sht_bytes;
396         if (sk_make_checksum(sht->sht_name, &skc->sc_shared_key, 2, msgbufs, 0,
397                              NULL, &checksum))
398                 return GSS_S_FAILURE;
399
400         token->len = cipher.len + checksum.len;
401
402         return GSS_S_COMPLETE;
403 }
404
405 static
406 __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
407                       rawobj_t *token, rawobj_t *message)
408 {
409         struct sk_ctx *skc = gss_context->internal_ctx_id;
410         struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
411         rawobj_t msgbufs[2];
412         rawobj_t cipher;
413         rawobj_t checksum;
414         unsigned int blocksize;
415         int rc;
416
417         LASSERT(skc->sc_session_kb.kb_tfm);
418         blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
419
420         if (token->len < sht->sht_bytes)
421                 return GSS_S_DEFECTIVE_TOKEN;
422
423         cipher.data = token->data;
424         cipher.len = token->len - sht->sht_bytes;
425         checksum.data = token->data + cipher.len;
426         checksum.len = sht->sht_bytes;
427
428         if (cipher.len % blocksize != 0)
429                 return GSS_S_DEFECTIVE_TOKEN;
430
431         /* Checksum covers the GSS header followed by the encrypted message */
432         msgbufs[0].len = gss_header->len;
433         msgbufs[0].data = gss_header->data;
434         msgbufs[1].len = cipher.len;
435         msgbufs[1].data = cipher.data;
436         rc = sk_verify_checksum(sht, &skc->sc_shared_key, 2, msgbufs, 0, NULL,
437                                &checksum);
438         if (rc)
439                 return rc;
440
441         message->len = cipher.len;
442         if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, 0, 1, &cipher,
443                               message, 0))
444                 return GSS_S_FAILURE;
445
446         return GSS_S_COMPLETE;
447 }
448
449 static
450 __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
451                        struct ptlrpc_bulk_desc *desc)
452 {
453         struct sk_ctx *skc = gss_context->internal_ctx_id;
454         int blocksize;
455         int i;
456
457         LASSERT(skc->sc_session_kb.kb_tfm);
458         blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
459
460         for (i = 0; i < desc->bd_iov_count; i++) {
461                 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
462                         CERROR("offset %d not blocksize aligned\n",
463                                BD_GET_KIOV(desc, i).kiov_offset);
464                         return GSS_S_FAILURE;
465                 }
466
467                 BD_GET_ENC_KIOV(desc, i).kiov_offset =
468                         BD_GET_KIOV(desc, i).kiov_offset;
469                 BD_GET_ENC_KIOV(desc, i).kiov_len =
470                         sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize);
471         }
472
473         return GSS_S_COMPLETE;
474 }
475
476 static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm,
477                              struct ptlrpc_bulk_desc *desc,
478                              rawobj_t *cipher,
479                              int adj_nob)
480 {
481         struct blkcipher_desc cdesc = {
482                 .tfm = tfm,
483                 .info = NULL,
484                 .flags = 0,
485         };
486         struct scatterlist ptxt;
487         struct scatterlist ctxt;
488         int blocksize;
489         int i;
490         int rc;
491         int nob = 0;
492
493         blocksize = crypto_blkcipher_blocksize(tfm);
494
495         sg_init_table(&ptxt, 1);
496         sg_init_table(&ctxt, 1);
497
498         for (i = 0; i < desc->bd_iov_count; i++) {
499                 sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page,
500                             sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
501                                           blocksize),
502                             BD_GET_KIOV(desc, i).kiov_offset);
503                 if (adj_nob)
504                         nob += ptxt.length;
505
506                 sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
507                             ptxt.length, ptxt.offset);
508
509                 BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
510                 BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
511
512                 rc = crypto_blkcipher_encrypt(&cdesc, &ctxt, &ptxt,
513                                               ptxt.length);
514                 if (rc) {
515                         CERROR("failed to encrypt page: %d\n", rc);
516                         return rc;
517                 }
518         }
519
520         if (adj_nob)
521                 desc->bd_nob = nob;
522
523         return 0;
524 }
525
526 static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm,
527                              struct ptlrpc_bulk_desc *desc,
528                              rawobj_t *cipher,
529                              int adj_nob)
530 {
531         struct blkcipher_desc cdesc = {
532                 .tfm = tfm,
533                 .info = NULL,
534                 .flags = 0,
535         };
536         struct scatterlist ptxt;
537         struct scatterlist ctxt;
538         int blocksize;
539         int i;
540         int rc;
541         int pnob = 0;
542         int cnob = 0;
543
544         sg_init_table(&ptxt, 1);
545         sg_init_table(&ctxt, 1);
546
547         blocksize = crypto_blkcipher_blocksize(tfm);
548         if (desc->bd_nob_transferred % blocksize != 0) {
549                 CERROR("Transfer not a multiple of block size: %d\n",
550                        desc->bd_nob_transferred);
551                 return GSS_S_DEFECTIVE_TOKEN;
552         }
553
554         for (i = 0; i < desc->bd_iov_count; i++) {
555                 lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
556                 lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
557
558                 if (piov->kiov_offset % blocksize != 0 ||
559                     piov->kiov_len % blocksize != 0) {
560                         CERROR("Invalid bulk descriptor vector\n");
561                         return GSS_S_DEFECTIVE_TOKEN;
562                 }
563
564                 /* Must adjust bytes here because we know the actual sizes after
565                  * decryption.  Similar to what gss_cli_ctx_unwrap_bulk does for
566                  * integrity only mode */
567                 if (adj_nob) {
568                         /* cipher text must not exceed transferred size */
569                         if (ciov->kiov_len + cnob > desc->bd_nob_transferred)
570                                 ciov->kiov_len =
571                                         desc->bd_nob_transferred - cnob;
572
573                         piov->kiov_len = ciov->kiov_len;
574
575                         /* plain text must not exceed bulk's size */
576                         if (ciov->kiov_len + pnob > desc->bd_nob)
577                                 piov->kiov_len = desc->bd_nob - pnob;
578                 } else {
579                         /* Taken from krb5_decrypt since it was not verified
580                          * whether or not LNET guarantees these */
581                         if (ciov->kiov_len + cnob > desc->bd_nob_transferred ||
582                             piov->kiov_len > ciov->kiov_len) {
583                                 CERROR("Invalid decrypted length\n");
584                                 return GSS_S_FAILURE;
585                         }
586                 }
587
588                 if (ciov->kiov_len == 0)
589                         continue;
590
591                 sg_init_table(&ctxt, 1);
592                 sg_set_page(&ctxt, ciov->kiov_page, ciov->kiov_len,
593                             ciov->kiov_offset);
594                 ptxt = ctxt;
595
596                 /* In the event the plain text size is not a multiple
597                  * of blocksize we decrypt in place and copy the result
598                  * after the decryption */
599                 if (piov->kiov_len % blocksize == 0)
600                         sg_assign_page(&ptxt, piov->kiov_page);
601
602                 rc = crypto_blkcipher_decrypt(&cdesc, &ptxt, &ctxt,
603                                               ctxt.length);
604                 if (rc) {
605                         CERROR("Decryption failed for page: %d\n", rc);
606                         return GSS_S_FAILURE;
607                 }
608
609                 if (piov->kiov_len % blocksize != 0) {
610                         memcpy(page_address(piov->kiov_page) +
611                                piov->kiov_offset,
612                                page_address(ciov->kiov_page) +
613                                ciov->kiov_offset,
614                                piov->kiov_len);
615                 }
616
617                 cnob += ciov->kiov_len;
618                 pnob += piov->kiov_len;
619         }
620
621         /* if needed, clear up the rest unused iovs */
622         if (adj_nob)
623                 while (i < desc->bd_iov_count)
624                         BD_GET_KIOV(desc, i++).kiov_len = 0;
625
626         if (unlikely(cnob != desc->bd_nob_transferred)) {
627                 CERROR("%d cipher text transferred but only %d decrypted\n",
628                        desc->bd_nob_transferred, cnob);
629                 return GSS_S_FAILURE;
630         }
631
632         if (unlikely(!adj_nob && pnob != desc->bd_nob)) {
633                 CERROR("%d plain text expected but only %d received\n",
634                        desc->bd_nob, pnob);
635                 return GSS_S_FAILURE;
636         }
637
638         return 0;
639 }
640
641 static
642 __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
643                          struct ptlrpc_bulk_desc *desc, rawobj_t *token,
644                          int adj_nob)
645 {
646         struct sk_ctx *skc = gss_context->internal_ctx_id;
647         struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
648         rawobj_t cipher = RAWOBJ_EMPTY;
649         rawobj_t checksum = RAWOBJ_EMPTY;
650
651         cipher.data = token->data;
652         cipher.len = token->len - sht->sht_bytes;
653
654         if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, desc, &cipher, adj_nob))
655                 return GSS_S_FAILURE;
656
657         checksum.data = token->data + cipher.len;
658         checksum.len = sht->sht_bytes;
659
660         if (sk_make_checksum(sht->sht_name, &skc->sc_shared_key, 1, &cipher, 0,
661                              NULL, &checksum))
662                 return GSS_S_FAILURE;
663
664         return GSS_S_COMPLETE;
665 }
666
667 static
668 __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
669                            struct ptlrpc_bulk_desc *desc,
670                            rawobj_t *token, int adj_nob)
671 {
672         struct sk_ctx *skc = gss_context->internal_ctx_id;
673         struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
674         rawobj_t cipher = RAWOBJ_EMPTY;
675         rawobj_t checksum = RAWOBJ_EMPTY;
676         int rc;
677
678         cipher.data = token->data;
679         cipher.len = token->len - sht->sht_bytes;
680         checksum.data = token->data + cipher.len;
681         checksum.len = sht->sht_bytes;
682
683         rc = sk_verify_checksum(&sk_hmac_types[skc->sc_hmac],
684                                 &skc->sc_shared_key, 1, &cipher, 0, NULL,
685                                 &checksum);
686         if (rc)
687                 return rc;
688
689         rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, desc, &cipher, adj_nob);
690         if (rc)
691                 return rc;
692
693         return GSS_S_COMPLETE;
694 }
695
696 static
697 void gss_delete_sec_context_sk(void *internal_context)
698 {
699         struct sk_ctx *sk_context = internal_context;
700         delete_sk_context(sk_context);
701         OBD_FREE_PTR(sk_context);
702 }
703
704 int gss_display_sk(struct gss_ctx *gss_context, char *buf, int bufsize)
705 {
706         return snprintf(buf, bufsize, "sk");
707 }
708
709 static struct gss_api_ops gss_sk_ops = {
710         .gss_import_sec_context     = gss_import_sec_context_sk,
711         .gss_copy_reverse_context   = gss_copy_reverse_context_sk,
712         .gss_inquire_context        = gss_inquire_context_sk,
713         .gss_get_mic                = gss_get_mic_sk,
714         .gss_verify_mic             = gss_verify_mic_sk,
715         .gss_wrap                   = gss_wrap_sk,
716         .gss_unwrap                 = gss_unwrap_sk,
717         .gss_prep_bulk              = gss_prep_bulk_sk,
718         .gss_wrap_bulk              = gss_wrap_bulk_sk,
719         .gss_unwrap_bulk            = gss_unwrap_bulk_sk,
720         .gss_delete_sec_context     = gss_delete_sec_context_sk,
721         .gss_display                = gss_display_sk,
722 };
723
724 static struct subflavor_desc gss_sk_sfs[] = {
725         {
726                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKI,
727                 .sf_qop         = 0,
728                 .sf_service     = SPTLRPC_SVC_INTG,
729                 .sf_name        = "ski"
730         },
731         {
732                 .sf_subflavor   = SPTLRPC_SUBFLVR_SKPI,
733                 .sf_qop         = 0,
734                 .sf_service     = SPTLRPC_SVC_PRIV,
735                 .sf_name        = "skpi"
736         },
737 };
738
739 /*
740  * currently we leave module owner NULL
741  */
742 static struct gss_api_mech gss_sk_mech = {
743         .gm_owner       = NULL, /*THIS_MODULE, */
744         .gm_name        = "sk",
745         .gm_oid         = (rawobj_t) {
746                 12,
747                 "\053\006\001\004\001\311\146\215\126\001\000\001",
748         },
749         .gm_ops         = &gss_sk_ops,
750         .gm_sf_num      = 2,
751         .gm_sfs         = gss_sk_sfs,
752 };
753
754 int __init init_sk_module(void)
755 {
756         int status;
757
758         status = lgss_mech_register(&gss_sk_mech);
759         if (status)
760                 CERROR("Failed to register sk gss mechanism!\n");
761
762         return status;
763 }
764
765 void cleanup_sk_module(void)
766 {
767         lgss_mech_unregister(&gss_sk_mech);
768 }