4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (C) 2013, 2015, Trustees of Indiana University
25 * Copyright (c) 2014, Intel Corporation.
27 * Author: Jeremy Filizetti <jfilizet@iu.edu>
28 * Author: Andrew Korty <ajk@iu.edu>
31 #define DEBUG_SUBSYSTEM S_SEC
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/crypto.h>
36 #include <linux/mutex.h>
39 #include <obd_class.h>
40 #include <obd_support.h>
41 #include <lustre/lustre_user.h>
44 #include "gss_crypto.h"
45 #include "gss_internal.h"
49 #define SK_INTERFACE_VERSION 1
57 rawobj_t sc_shared_key;
59 struct gss_keyblock sc_session_kb;
62 static struct sk_crypt_type sk_crypt_types[] = {
63 [SK_CRYPT_AES256_CTR] = {
64 .sct_name = "ctr(aes256)",
69 static struct sk_hmac_type sk_hmac_types[] = {
71 .sht_name = "hmac(sha256)",
75 .sht_name = "hmac(sha512)",
80 static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
82 return (len + blocksize - 1) & (~(blocksize - 1));
85 static int sk_init_keys(struct sk_ctx *skc)
90 rc = gss_keyblock_init(&skc->sc_session_kb,
91 sk_crypt_types[skc->sc_crypt].sct_name, 0);
95 ivsize = crypto_blkcipher_ivsize(skc->sc_session_kb.kb_tfm);
96 if (skc->sc_iv.len != ivsize) {
97 CERROR("IV size for algorithm (%d) does not match provided IV "
98 "size: %d\n", ivsize, skc->sc_iv.len);
102 crypto_blkcipher_set_iv(skc->sc_session_kb.kb_tfm,
103 skc->sc_iv.data, skc->sc_iv.len);
108 static int fill_sk_context(rawobj_t *inbuf, struct sk_ctx *skc)
110 char *ptr = inbuf->data;
111 char *end = inbuf->data + inbuf->len;
114 /* see sk_serialize_kctx() for format from userspace side */
116 if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
117 CERROR("Failed to read shared key interface version");
120 if (tmp != SK_INTERFACE_VERSION) {
121 CERROR("Invalid shared key interface version: %d\n", tmp);
126 if (gss_get_bytes(&ptr, end, &skc->sc_hmac, sizeof(skc->sc_hmac))) {
127 CERROR("Failed to read HMAC algorithm type");
130 if (skc->sc_hmac >= SK_HMAC_MAX) {
131 CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
136 if (gss_get_bytes(&ptr, end, &skc->sc_crypt, sizeof(skc->sc_crypt))) {
137 CERROR("Failed to read crypt algorithm type");
140 if (skc->sc_crypt >= SK_CRYPT_MAX) {
141 CERROR("Invalid crypt type: %d\n", skc->sc_crypt);
145 /* 4. expiration time */
146 if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
147 CERROR("Failed to read context expiration time");
150 skc->sc_expire = tmp + cfs_time_current_sec();
153 if (gss_get_rawobj(&ptr, end, &skc->sc_shared_key)) {
154 CERROR("Failed to read shared key");
157 if (skc->sc_shared_key.len <= SK_MIN_SIZE) {
158 CERROR("Shared key must key must be larger than %d bytes\n",
163 /* 6. IV, can be empty if not using privacy mode */
164 if (gss_get_rawobj(&ptr, end, &skc->sc_iv)) {
165 CERROR("Failed to read initialization vector ");
169 /* 7. Session key, can be empty if not using privacy mode */
170 if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
171 CERROR("Failed to read session key");
178 static void delete_sk_context(struct sk_ctx *skc)
182 gss_keyblock_free(&skc->sc_session_kb);
183 rawobj_free(&skc->sc_iv);
184 rawobj_free(&skc->sc_shared_key);
188 __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context)
191 bool privacy = false;
193 if (inbuf == NULL || inbuf->data == NULL)
194 return GSS_S_FAILURE;
198 return GSS_S_FAILURE;
200 if (fill_sk_context(inbuf, skc))
203 /* Only privacy mode needs to initialize keys */
204 if (skc->sc_session_kb.kb_key.len > 0) {
206 if (sk_init_keys(skc))
210 gss_context->internal_ctx_id = skc;
211 CDEBUG(D_SEC, "successfully imported sk%s context\n",
212 privacy ? "pi" : "i");
214 return GSS_S_COMPLETE;
217 delete_sk_context(skc);
219 return GSS_S_FAILURE;
223 __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old,
224 struct gss_ctx *gss_context_new)
226 struct sk_ctx *skc_old = gss_context_old->internal_ctx_id;
227 struct sk_ctx *skc_new;
229 OBD_ALLOC_PTR(skc_new);
231 return GSS_S_FAILURE;
233 skc_new->sc_crypt = skc_old->sc_crypt;
234 skc_new->sc_hmac = skc_old->sc_hmac;
235 skc_new->sc_expire = skc_old->sc_expire;
236 if (rawobj_dup(&skc_new->sc_shared_key, &skc_old->sc_shared_key))
238 if (rawobj_dup(&skc_new->sc_iv, &skc_old->sc_iv))
240 if (gss_keyblock_dup(&skc_new->sc_session_kb, &skc_old->sc_session_kb))
243 /* Only privacy mode needs to initialize keys */
244 if (skc_new->sc_session_kb.kb_key.len > 0)
245 if (sk_init_keys(skc_new))
248 gss_context_new->internal_ctx_id = skc_new;
249 CDEBUG(D_SEC, "successfully copied reverse sk context\n");
251 return GSS_S_COMPLETE;
254 delete_sk_context(skc_new);
255 OBD_FREE_PTR(skc_new);
256 return GSS_S_FAILURE;
260 __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
261 unsigned long *endtime)
263 struct sk_ctx *skc = gss_context->internal_ctx_id;
265 *endtime = skc->sc_expire;
266 return GSS_S_COMPLETE;
270 __u32 sk_make_hmac(char *alg_name, rawobj_t *key, int msg_count, rawobj_t *msgs,
271 int iov_count, lnet_kiov_t *iovs, rawobj_t *token)
273 struct crypto_hash *tfm;
276 tfm = crypto_alloc_hash(alg_name, 0, 0);
278 return GSS_S_FAILURE;
281 LASSERT(token->len >= crypto_hash_digestsize(tfm));
282 if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
286 crypto_free_hash(tfm);
291 __u32 gss_get_mic_sk(struct gss_ctx *gss_context,
298 struct sk_ctx *skc = gss_context->internal_ctx_id;
299 return sk_make_hmac(sk_hmac_types[skc->sc_hmac].sht_name,
300 &skc->sc_shared_key, message_count, messages,
301 iov_count, iovs, token);
305 __u32 sk_verify_hmac(struct sk_hmac_type *sht, rawobj_t *key, int message_count,
306 rawobj_t *messages, int iov_count, lnet_kiov_t *iovs,
309 rawobj_t checksum = RAWOBJ_EMPTY;
310 __u32 rc = GSS_S_FAILURE;
312 checksum.len = sht->sht_bytes;
313 if (token->len < checksum.len) {
314 CDEBUG(D_SEC, "Token received too short, expected %d "
315 "received %d\n", token->len, checksum.len);
316 return GSS_S_DEFECTIVE_TOKEN;
319 OBD_ALLOC_LARGE(checksum.data, checksum.len);
323 if (sk_make_hmac(sht->sht_name, key, message_count, messages,
324 iov_count, iovs, &checksum)) {
325 CDEBUG(D_SEC, "Failed to create checksum to validate\n");
329 if (memcmp(token->data, checksum.data, checksum.len)) {
330 CERROR("checksum mismatch\n");
338 OBD_FREE(checksum.data, checksum.len);
342 /* sk_verify_bulk_hmac() differs slightly from sk_verify_hmac() because all
343 * encrypted pages in the bulk descriptor are populated although we only need
344 * to decrypt up to the number of bytes actually specified from the sender
345 * (bd_nob) otherwise the calulated HMAC will be incorrect. */
347 __u32 sk_verify_bulk_hmac(struct sk_hmac_type *sht, rawobj_t *key,
348 int msgcnt, rawobj_t *msgs, int iovcnt,
349 lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token)
351 rawobj_t checksum = RAWOBJ_EMPTY;
352 struct crypto_hash *tfm;
353 struct hash_desc desc = {
357 struct scatterlist sg[1];
361 int rc = GSS_S_FAILURE;
363 checksum.len = sht->sht_bytes;
364 if (token->len < checksum.len) {
365 CDEBUG(D_SEC, "Token received too short, expected %d "
366 "received %d\n", token->len, checksum.len);
367 return GSS_S_DEFECTIVE_TOKEN;
370 OBD_ALLOC_LARGE(checksum.data, checksum.len);
374 tfm = crypto_alloc_hash(sht->sht_name, 0, 0);
380 LASSERT(token->len >= crypto_hash_digestsize(tfm));
382 rc = crypto_hash_setkey(tfm, key->data, key->len);
386 rc = crypto_hash_init(&desc);
390 for (i = 0; i < msgcnt; i++) {
391 if (msgs[i].len == 0)
394 rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
398 rc = crypto_hash_update(&desc, sg, msgs[i].len);
400 gss_teardown_sgtable(&sgt);
404 gss_teardown_sgtable(&sgt);
407 for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
408 if (iovs[i].kiov_len == 0)
411 bytes = min_t(int, iov_bytes, iovs[i].kiov_len);
414 sg_init_table(sg, 1);
415 sg_set_page(&sg[0], iovs[i].kiov_page, bytes,
416 iovs[i].kiov_offset);
417 rc = crypto_hash_update(&desc, sg, bytes);
422 crypto_hash_final(&desc, checksum.data);
424 if (memcmp(token->data, checksum.data, checksum.len)) {
432 crypto_free_hash(tfm);
435 OBD_FREE_LARGE(checksum.data, checksum.len);
441 __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
448 struct sk_ctx *skc = gss_context->internal_ctx_id;
449 return sk_verify_hmac(&sk_hmac_types[skc->sc_hmac], &skc->sc_shared_key,
450 message_count, messages, iov_count, iovs, token);
454 __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
455 rawobj_t *message, int message_buffer_length,
458 struct sk_ctx *skc = gss_context->internal_ctx_id;
459 struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
463 unsigned int blocksize;
465 LASSERT(skc->sc_session_kb.kb_tfm);
466 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
468 if (gss_add_padding(message, message_buffer_length, blocksize))
469 return GSS_S_FAILURE;
471 /* Only encrypting the message data */
472 cipher.data = token->data;
473 cipher.len = token->len - sht->sht_bytes;
474 if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, 0, 1, message,
476 return GSS_S_FAILURE;
478 /* Checksum covers the GSS header followed by the encrypted message */
479 msgbufs[0].len = gss_header->len;
480 msgbufs[0].data = gss_header->data;
481 msgbufs[1].len = cipher.len;
482 msgbufs[1].data = cipher.data;
484 LASSERT(cipher.len + sht->sht_bytes <= token->len);
485 checksum.data = token->data + cipher.len;
486 checksum.len = sht->sht_bytes;
487 if (sk_make_hmac(sht->sht_name, &skc->sc_shared_key, 2, msgbufs, 0,
489 return GSS_S_FAILURE;
491 token->len = cipher.len + checksum.len;
493 return GSS_S_COMPLETE;
497 __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
498 rawobj_t *token, rawobj_t *message)
500 struct sk_ctx *skc = gss_context->internal_ctx_id;
501 struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
505 unsigned int blocksize;
508 LASSERT(skc->sc_session_kb.kb_tfm);
509 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
511 if (token->len < sht->sht_bytes)
512 return GSS_S_DEFECTIVE_TOKEN;
514 cipher.data = token->data;
515 cipher.len = token->len - sht->sht_bytes;
516 checksum.data = token->data + cipher.len;
517 checksum.len = sht->sht_bytes;
519 if (cipher.len % blocksize != 0)
520 return GSS_S_DEFECTIVE_TOKEN;
522 /* Checksum covers the GSS header followed by the encrypted message */
523 msgbufs[0].len = gss_header->len;
524 msgbufs[0].data = gss_header->data;
525 msgbufs[1].len = cipher.len;
526 msgbufs[1].data = cipher.data;
527 rc = sk_verify_hmac(sht, &skc->sc_shared_key, 2, msgbufs, 0, NULL,
532 message->len = cipher.len;
533 if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, 0, 1, &cipher,
535 return GSS_S_FAILURE;
537 return GSS_S_COMPLETE;
541 __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
542 struct ptlrpc_bulk_desc *desc)
544 struct sk_ctx *skc = gss_context->internal_ctx_id;
548 LASSERT(skc->sc_session_kb.kb_tfm);
549 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
551 for (i = 0; i < desc->bd_iov_count; i++) {
552 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
553 CERROR("offset %d not blocksize aligned\n",
554 BD_GET_KIOV(desc, i).kiov_offset);
555 return GSS_S_FAILURE;
558 BD_GET_ENC_KIOV(desc, i).kiov_offset =
559 BD_GET_KIOV(desc, i).kiov_offset;
560 BD_GET_ENC_KIOV(desc, i).kiov_len =
561 sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize);
564 return GSS_S_COMPLETE;
567 static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm,
568 struct ptlrpc_bulk_desc *desc,
572 struct blkcipher_desc cdesc = {
577 struct scatterlist ptxt;
578 struct scatterlist ctxt;
584 blocksize = crypto_blkcipher_blocksize(tfm);
586 sg_init_table(&ptxt, 1);
587 sg_init_table(&ctxt, 1);
589 for (i = 0; i < desc->bd_iov_count; i++) {
590 sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page,
591 sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
593 BD_GET_KIOV(desc, i).kiov_offset);
597 sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
598 ptxt.length, ptxt.offset);
600 BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
601 BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
603 rc = crypto_blkcipher_encrypt(&cdesc, &ctxt, &ptxt,
606 CERROR("failed to encrypt page: %d\n", rc);
617 static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm,
618 struct ptlrpc_bulk_desc *desc,
622 struct blkcipher_desc cdesc = {
627 struct scatterlist ptxt;
628 struct scatterlist ctxt;
635 sg_init_table(&ptxt, 1);
636 sg_init_table(&ctxt, 1);
638 blocksize = crypto_blkcipher_blocksize(tfm);
639 if (desc->bd_nob_transferred % blocksize != 0) {
640 CERROR("Transfer not a multiple of block size: %d\n",
641 desc->bd_nob_transferred);
642 return GSS_S_DEFECTIVE_TOKEN;
645 for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
647 lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
648 lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
650 if (ciov->kiov_offset % blocksize != 0 ||
651 ciov->kiov_len % blocksize != 0) {
652 CERROR("Invalid bulk descriptor vector\n");
653 return GSS_S_DEFECTIVE_TOKEN;
656 /* Must adjust bytes here because we know the actual sizes after
657 * decryption. Similar to what gss_cli_ctx_unwrap_bulk does for
658 * integrity only mode */
660 /* cipher text must not exceed transferred size */
661 if (ciov->kiov_len + cnob > desc->bd_nob_transferred)
663 desc->bd_nob_transferred - cnob;
665 piov->kiov_len = ciov->kiov_len;
667 /* plain text must not exceed bulk's size */
668 if (ciov->kiov_len + pnob > desc->bd_nob)
669 piov->kiov_len = desc->bd_nob - pnob;
671 /* Taken from krb5_decrypt since it was not verified
672 * whether or not LNET guarantees these */
673 if (ciov->kiov_len + cnob > desc->bd_nob_transferred ||
674 piov->kiov_len > ciov->kiov_len) {
675 CERROR("Invalid decrypted length\n");
676 return GSS_S_FAILURE;
680 if (ciov->kiov_len == 0)
683 sg_init_table(&ctxt, 1);
684 sg_set_page(&ctxt, ciov->kiov_page, ciov->kiov_len,
688 /* In the event the plain text size is not a multiple
689 * of blocksize we decrypt in place and copy the result
690 * after the decryption */
691 if (piov->kiov_len % blocksize == 0)
692 sg_assign_page(&ptxt, piov->kiov_page);
694 rc = crypto_blkcipher_decrypt(&cdesc, &ptxt, &ctxt,
697 CERROR("Decryption failed for page: %d\n", rc);
698 return GSS_S_FAILURE;
701 if (piov->kiov_len % blocksize != 0) {
702 memcpy(page_address(piov->kiov_page) +
704 page_address(ciov->kiov_page) +
709 cnob += ciov->kiov_len;
710 pnob += piov->kiov_len;
713 /* if needed, clear up the rest unused iovs */
715 while (i < desc->bd_iov_count)
716 BD_GET_KIOV(desc, i++).kiov_len = 0;
718 if (unlikely(cnob != desc->bd_nob_transferred)) {
719 CERROR("%d cipher text transferred but only %d decrypted\n",
720 desc->bd_nob_transferred, cnob);
721 return GSS_S_FAILURE;
724 if (unlikely(!adj_nob && pnob != desc->bd_nob)) {
725 CERROR("%d plain text expected but only %d received\n",
727 return GSS_S_FAILURE;
734 __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
735 struct ptlrpc_bulk_desc *desc, rawobj_t *token,
738 struct sk_ctx *skc = gss_context->internal_ctx_id;
739 struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
740 rawobj_t cipher = RAWOBJ_EMPTY;
741 rawobj_t checksum = RAWOBJ_EMPTY;
743 cipher.data = token->data;
744 cipher.len = token->len - sht->sht_bytes;
745 memset(token->data, 0, token->len);
747 if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, desc, &cipher, adj_nob))
748 return GSS_S_FAILURE;
750 checksum.data = token->data + cipher.len;
751 checksum.len = sht->sht_bytes;
753 if (sk_make_hmac(sht->sht_name, &skc->sc_shared_key, 1, &cipher,
754 desc->bd_iov_count, GET_ENC_KIOV(desc), &checksum))
755 return GSS_S_FAILURE;
757 return GSS_S_COMPLETE;
761 __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
762 struct ptlrpc_bulk_desc *desc,
763 rawobj_t *token, int adj_nob)
765 struct sk_ctx *skc = gss_context->internal_ctx_id;
766 struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
767 rawobj_t cipher = RAWOBJ_EMPTY;
768 rawobj_t checksum = RAWOBJ_EMPTY;
771 cipher.data = token->data;
772 cipher.len = token->len - sht->sht_bytes;
773 checksum.data = token->data + cipher.len;
774 checksum.len = sht->sht_bytes;
776 rc = sk_verify_bulk_hmac(&sk_hmac_types[skc->sc_hmac],
777 &skc->sc_shared_key, 1, &cipher,
778 desc->bd_iov_count, GET_ENC_KIOV(desc),
779 desc->bd_nob, &checksum);
783 rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, desc, &cipher, adj_nob);
787 return GSS_S_COMPLETE;
791 void gss_delete_sec_context_sk(void *internal_context)
793 struct sk_ctx *sk_context = internal_context;
794 delete_sk_context(sk_context);
795 OBD_FREE_PTR(sk_context);
798 int gss_display_sk(struct gss_ctx *gss_context, char *buf, int bufsize)
800 return snprintf(buf, bufsize, "sk");
803 static struct gss_api_ops gss_sk_ops = {
804 .gss_import_sec_context = gss_import_sec_context_sk,
805 .gss_copy_reverse_context = gss_copy_reverse_context_sk,
806 .gss_inquire_context = gss_inquire_context_sk,
807 .gss_get_mic = gss_get_mic_sk,
808 .gss_verify_mic = gss_verify_mic_sk,
809 .gss_wrap = gss_wrap_sk,
810 .gss_unwrap = gss_unwrap_sk,
811 .gss_prep_bulk = gss_prep_bulk_sk,
812 .gss_wrap_bulk = gss_wrap_bulk_sk,
813 .gss_unwrap_bulk = gss_unwrap_bulk_sk,
814 .gss_delete_sec_context = gss_delete_sec_context_sk,
815 .gss_display = gss_display_sk,
818 static struct subflavor_desc gss_sk_sfs[] = {
820 .sf_subflavor = SPTLRPC_SUBFLVR_SKN,
822 .sf_service = SPTLRPC_SVC_NULL,
826 .sf_subflavor = SPTLRPC_SUBFLVR_SKA,
828 .sf_service = SPTLRPC_SVC_AUTH,
832 .sf_subflavor = SPTLRPC_SUBFLVR_SKI,
834 .sf_service = SPTLRPC_SVC_INTG,
838 .sf_subflavor = SPTLRPC_SUBFLVR_SKPI,
840 .sf_service = SPTLRPC_SVC_PRIV,
846 * currently we leave module owner NULL
848 static struct gss_api_mech gss_sk_mech = {
849 .gm_owner = NULL, /*THIS_MODULE, */
851 .gm_oid = (rawobj_t) {
853 "\053\006\001\004\001\311\146\215\126\001\000\001",
855 .gm_ops = &gss_sk_ops,
857 .gm_sfs = gss_sk_sfs,
860 int __init init_sk_module(void)
864 status = lgss_mech_register(&gss_sk_mech);
866 CERROR("Failed to register sk gss mechanism!\n");
871 void cleanup_sk_module(void)
873 lgss_mech_unregister(&gss_sk_mech);