4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (C) 2013, 2015, Trustees of Indiana University
25 * Copyright (c) 2014, 2016, Intel Corporation.
27 * Author: Jeremy Filizetti <jfilizet@iu.edu>
28 * Author: Andrew Korty <ajk@iu.edu>
31 #define DEBUG_SUBSYSTEM S_SEC
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/crypto.h>
36 #include <linux/mutex.h>
37 #include <crypto/ctr.h>
40 #include <obd_class.h>
41 #include <obd_support.h>
44 #include "gss_crypto.h"
45 #include "gss_internal.h"
49 #define SK_INTERFACE_VERSION 1
50 #define SK_MSG_VERSION 1
54 /* Starting number for reverse contexts. It is critical to security
55 * that reverse contexts use a different range of numbers than regular
56 * contexts because they are using the same key. Therefore the IV/nonce
57 * combination must be unique for them. To accomplish this reverse contexts
58 * use the the negative range of a 64-bit number and regular contexts use the
59 * postive range. If the same IV/nonce combination were reused it would leak
60 * information about the plaintext. */
61 #define SK_IV_REV_START (1ULL << 63)
64 enum cfs_crypto_crypt_alg sc_crypt;
65 enum cfs_crypto_hash_alg sc_hmac;
71 struct gss_keyblock sc_session_kb;
77 } __attribute__((packed));
79 /* The format of SK wire data is similar to that of RFC3686 ESP Payload
80 * (section 3) except instead of just an IV there is a struct sk_hdr.
81 * ---------------------------------------------------------------------
82 * | struct sk_hdr | ciphertext (variable size) | HMAC (variable size) |
83 * --------------------------------------------------------------------- */
90 static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
92 return (len + blocksize - 1) & (~(blocksize - 1));
95 static int sk_fill_header(struct sk_ctx *skc, struct sk_hdr *skh)
98 skh->skh_version = be64_to_cpu(SK_MSG_VERSION);
100 /* Always using inc_return so we don't use our initial numbers which
101 * could be the reuse detecting numbers */
102 tmp_iv = atomic64_inc_return(&skc->sc_iv);
103 skh->skh_iv = be64_to_cpu(tmp_iv);
104 if (tmp_iv == 0 || tmp_iv == SK_IV_REV_START) {
105 CERROR("Counter looped, connection must be reset to avoid "
106 "plaintext information\n");
107 return GSS_S_FAILURE;
110 return GSS_S_COMPLETE;
113 static int sk_verify_header(struct sk_hdr *skh)
115 if (cpu_to_be64(skh->skh_version) != SK_MSG_VERSION)
116 return GSS_S_DEFECTIVE_TOKEN;
118 return GSS_S_COMPLETE;
121 void sk_construct_rfc3686_iv(__u8 *iv, __u32 nonce, __u64 partial_iv)
123 __u32 ctr = cpu_to_be32(1);
125 memcpy(iv, &nonce, CTR_RFC3686_NONCE_SIZE);
126 iv += CTR_RFC3686_NONCE_SIZE;
127 memcpy(iv, &partial_iv, CTR_RFC3686_IV_SIZE);
128 iv += CTR_RFC3686_IV_SIZE;
129 memcpy(iv, &ctr, sizeof(ctr));
132 static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc)
134 char *ptr = inbuf->data;
135 char *end = inbuf->data + inbuf->len;
136 char sk_hmac[CRYPTO_MAX_ALG_NAME];
137 char sk_crypt[CRYPTO_MAX_ALG_NAME];
140 /* see sk_serialize_kctx() for format from userspace side */
142 if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
143 CERROR("Failed to read shared key interface version\n");
146 if (tmp != SK_INTERFACE_VERSION) {
147 CERROR("Invalid shared key interface version: %d\n", tmp);
152 if (gss_get_bytes(&ptr, end, &sk_hmac, sizeof(sk_hmac))) {
153 CERROR("Failed to read HMAC algorithm type\n");
157 skc->sc_hmac = cfs_crypto_hash_alg(sk_hmac);
158 if (skc->sc_hmac != CFS_HASH_ALG_NULL &&
159 skc->sc_hmac != CFS_HASH_ALG_SHA256 &&
160 skc->sc_hmac != CFS_HASH_ALG_SHA512) {
161 CERROR("Invalid hmac type: %s\n", sk_hmac);
166 if (gss_get_bytes(&ptr, end, &sk_crypt, sizeof(sk_crypt))) {
167 CERROR("Failed to read crypt algorithm type\n");
171 skc->sc_crypt = cfs_crypto_crypt_alg(sk_crypt);
172 if (skc->sc_crypt == CFS_CRYPT_ALG_UNKNOWN) {
173 CERROR("Invalid crypt type: %s\n", sk_crypt);
177 /* 4. expiration time */
178 if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
179 CERROR("Failed to read context expiration time\n");
182 skc->sc_expire = tmp + ktime_get_real_seconds();
184 /* 5. host random is used as nonce for encryption */
185 if (gss_get_bytes(&ptr, end, &skc->sc_host_random,
186 sizeof(skc->sc_host_random))) {
187 CERROR("Failed to read host random\n");
191 /* 6. peer random is used as nonce for decryption */
192 if (gss_get_bytes(&ptr, end, &skc->sc_peer_random,
193 sizeof(skc->sc_peer_random))) {
194 CERROR("Failed to read peer random\n");
199 if (gss_get_rawobj(&ptr, end, &skc->sc_hmac_key)) {
200 CERROR("Failed to read HMAC key\n");
203 if (skc->sc_hmac_key.len <= SK_MIN_SIZE) {
204 CERROR("HMAC key must key must be larger than %d bytes\n",
209 /* 8. Session key, can be empty if not using privacy mode */
210 if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
211 CERROR("Failed to read session key\n");
218 static void sk_delete_context(struct sk_ctx *skc)
223 rawobj_free(&skc->sc_hmac_key);
224 gss_keyblock_free(&skc->sc_session_kb);
229 __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context)
232 bool privacy = false;
234 if (inbuf == NULL || inbuf->data == NULL)
235 return GSS_S_FAILURE;
239 return GSS_S_FAILURE;
241 atomic64_set(&skc->sc_iv, 0);
243 if (sk_fill_context(inbuf, skc))
246 /* Only privacy mode needs to initialize keys */
247 if (skc->sc_session_kb.kb_key.len > 0) {
249 if (gss_keyblock_init(&skc->sc_session_kb,
250 cfs_crypto_crypt_name(skc->sc_crypt), 0))
254 gss_context->internal_ctx_id = skc;
255 CDEBUG(D_SEC, "successfully imported sk%s context\n",
256 privacy ? " (with privacy)" : "");
258 return GSS_S_COMPLETE;
261 sk_delete_context(skc);
262 return GSS_S_FAILURE;
266 __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old,
267 struct gss_ctx *gss_context_new)
269 struct sk_ctx *skc_old = gss_context_old->internal_ctx_id;
270 struct sk_ctx *skc_new;
272 OBD_ALLOC_PTR(skc_new);
274 return GSS_S_FAILURE;
276 skc_new->sc_hmac = skc_old->sc_hmac;
277 skc_new->sc_crypt = skc_old->sc_crypt;
278 skc_new->sc_expire = skc_old->sc_expire;
279 skc_new->sc_host_random = skc_old->sc_host_random;
280 skc_new->sc_peer_random = skc_old->sc_peer_random;
282 atomic64_set(&skc_new->sc_iv, SK_IV_REV_START);
284 if (rawobj_dup(&skc_new->sc_hmac_key, &skc_old->sc_hmac_key))
286 if (gss_keyblock_dup(&skc_new->sc_session_kb, &skc_old->sc_session_kb))
289 /* Only privacy mode needs to initialize keys */
290 if (skc_new->sc_session_kb.kb_key.len > 0)
291 if (gss_keyblock_init(&skc_new->sc_session_kb,
292 cfs_crypto_crypt_name(skc_new->sc_crypt),
296 gss_context_new->internal_ctx_id = skc_new;
297 CDEBUG(D_SEC, "successfully copied reverse sk context\n");
299 return GSS_S_COMPLETE;
302 sk_delete_context(skc_new);
303 return GSS_S_FAILURE;
307 __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
310 struct sk_ctx *skc = gss_context->internal_ctx_id;
312 *endtime = skc->sc_expire;
313 return GSS_S_COMPLETE;
317 u32 sk_make_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key, int msg_count,
318 rawobj_t *msgs, int iov_count, lnet_kiov_t *iovs,
319 rawobj_t *token, digest_hash hash_func)
321 struct ahash_request *req;
324 req = cfs_crypto_hash_init(algo, key->data, key->len);
327 goto out_init_failed;
332 rc2 = hash_func(req, NULL, msg_count, msgs, iov_count,
335 rc2 = gss_digest_hash(req, NULL, msg_count, msgs, iov_count,
338 rc = cfs_crypto_hash_final(req, token->data, &token->len);
342 return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
346 __u32 gss_get_mic_sk(struct gss_ctx *gss_context,
353 struct sk_ctx *skc = gss_context->internal_ctx_id;
355 return sk_make_hmac(skc->sc_hmac,
356 &skc->sc_hmac_key, message_count, messages,
357 iov_count, iovs, token, gss_context->hash_func);
361 u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key,
362 int message_count, rawobj_t *messages,
363 int iov_count, lnet_kiov_t *iovs,
364 rawobj_t *token, digest_hash hash_func)
366 rawobj_t checksum = RAWOBJ_EMPTY;
367 __u32 rc = GSS_S_FAILURE;
369 checksum.len = cfs_crypto_hash_digestsize(algo);
370 if (token->len < checksum.len) {
371 CDEBUG(D_SEC, "Token received too short, expected %d "
372 "received %d\n", token->len, checksum.len);
373 return GSS_S_DEFECTIVE_TOKEN;
376 OBD_ALLOC_LARGE(checksum.data, checksum.len);
380 if (sk_make_hmac(algo, key, message_count,
381 messages, iov_count, iovs, &checksum,
383 CDEBUG(D_SEC, "Failed to create checksum to validate\n");
387 if (memcmp(token->data, checksum.data, checksum.len)) {
388 CERROR("checksum mismatch\n");
396 OBD_FREE(checksum.data, checksum.len);
400 /* sk_verify_bulk_hmac() differs slightly from sk_verify_hmac() because all
401 * encrypted pages in the bulk descriptor are populated although we only need
402 * to decrypt up to the number of bytes actually specified from the sender
403 * (bd_nob) otherwise the calulated HMAC will be incorrect. */
405 u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac, rawobj_t *key,
406 int msgcnt, rawobj_t *msgs, int iovcnt,
407 lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token)
409 rawobj_t checksum = RAWOBJ_EMPTY;
410 struct ahash_request *req;
411 struct scatterlist sg[1];
417 checksum.len = cfs_crypto_hash_digestsize(sc_hmac);
418 if (token->len < checksum.len) {
419 CDEBUG(D_SEC, "Token received too short, expected %d "
420 "received %d\n", token->len, checksum.len);
421 return GSS_S_DEFECTIVE_TOKEN;
424 OBD_ALLOC_LARGE(checksum.data, checksum.len);
426 return GSS_S_FAILURE;
428 req = cfs_crypto_hash_init(sc_hmac, key->data, key->len);
434 for (i = 0; i < msgcnt; i++) {
438 rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
442 ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
443 rc = crypto_ahash_update(req);
445 gss_teardown_sgtable(&sgt);
449 gss_teardown_sgtable(&sgt);
452 for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
453 if (iovs[i].kiov_len == 0)
456 bytes = min_t(int, iov_bytes, iovs[i].kiov_len);
459 sg_init_table(sg, 1);
460 sg_set_page(&sg[0], iovs[i].kiov_page, bytes,
461 iovs[i].kiov_offset);
462 ahash_request_set_crypt(req, sg, NULL, bytes);
463 rc = crypto_ahash_update(req);
469 cfs_crypto_hash_final(req, checksum.data, &checksum.len);
473 if (memcmp(token->data, checksum.data, checksum.len))
479 OBD_FREE_LARGE(checksum.data, checksum.len);
485 __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
492 struct sk_ctx *skc = gss_context->internal_ctx_id;
494 return sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key,
495 message_count, messages, iov_count, iovs, token,
496 gss_context->hash_func);
500 __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
501 rawobj_t *message, int message_buffer_length,
504 struct sk_ctx *skc = gss_context->internal_ctx_id;
505 size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
509 __u8 local_iv[SK_IV_SIZE];
510 unsigned int blocksize;
512 LASSERT(skc->sc_session_kb.kb_tfm);
514 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
515 if (gss_add_padding(message, message_buffer_length, blocksize))
516 return GSS_S_FAILURE;
518 memset(token->data, 0, token->len);
520 if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
521 return GSS_S_FAILURE;
523 skw.skw_header.data = token->data;
524 skw.skw_header.len = sizeof(skh);
525 memcpy(skw.skw_header.data, &skh, sizeof(skh));
527 sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
528 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
529 skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
530 if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message,
532 return GSS_S_FAILURE;
534 /* HMAC covers the SK header, GSS header, and ciphertext */
535 msgbufs[0] = skw.skw_header;
536 msgbufs[1] = *gss_header;
537 msgbufs[2] = skw.skw_cipher;
539 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
540 skw.skw_hmac.len = sht_bytes;
541 if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key,
542 3, msgbufs, 0, NULL, &skw.skw_hmac,
543 gss_context->hash_func))
544 return GSS_S_FAILURE;
546 token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len;
548 return GSS_S_COMPLETE;
552 __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
553 rawobj_t *token, rawobj_t *message)
555 struct sk_ctx *skc = gss_context->internal_ctx_id;
556 size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
560 __u8 local_iv[SK_IV_SIZE];
561 unsigned int blocksize;
564 LASSERT(skc->sc_session_kb.kb_tfm);
566 if (token->len < sizeof(skh) + sht_bytes)
567 return GSS_S_DEFECTIVE_TOKEN;
569 skw.skw_header.data = token->data;
570 skw.skw_header.len = sizeof(struct sk_hdr);
571 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
572 skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
573 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
574 skw.skw_hmac.len = sht_bytes;
576 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
577 if (skw.skw_cipher.len % blocksize != 0)
578 return GSS_S_DEFECTIVE_TOKEN;
580 skh = (struct sk_hdr *)skw.skw_header.data;
581 rc = sk_verify_header(skh);
582 if (rc != GSS_S_COMPLETE)
585 /* HMAC covers the SK header, GSS header, and ciphertext */
586 msgbufs[0] = skw.skw_header;
587 msgbufs[1] = *gss_header;
588 msgbufs[2] = skw.skw_cipher;
589 rc = sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key, 3, msgbufs,
590 0, NULL, &skw.skw_hmac, gss_context->hash_func);
594 sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
595 message->len = skw.skw_cipher.len;
596 if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv,
597 1, &skw.skw_cipher, message, 0))
598 return GSS_S_FAILURE;
600 return GSS_S_COMPLETE;
604 __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
605 struct ptlrpc_bulk_desc *desc)
607 struct sk_ctx *skc = gss_context->internal_ctx_id;
611 LASSERT(skc->sc_session_kb.kb_tfm);
612 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
614 for (i = 0; i < desc->bd_iov_count; i++) {
615 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
616 CERROR("offset %d not blocksize aligned\n",
617 BD_GET_KIOV(desc, i).kiov_offset);
618 return GSS_S_FAILURE;
621 BD_GET_ENC_KIOV(desc, i).kiov_offset =
622 BD_GET_KIOV(desc, i).kiov_offset;
623 BD_GET_ENC_KIOV(desc, i).kiov_len =
624 sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize);
627 return GSS_S_COMPLETE;
630 static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
631 struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
634 struct blkcipher_desc cdesc = {
639 struct scatterlist ptxt;
640 struct scatterlist ctxt;
646 blocksize = crypto_blkcipher_blocksize(tfm);
648 sg_init_table(&ptxt, 1);
649 sg_init_table(&ctxt, 1);
651 for (i = 0; i < desc->bd_iov_count; i++) {
652 sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page,
653 sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
655 BD_GET_KIOV(desc, i).kiov_offset);
658 sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
659 ptxt.length, ptxt.offset);
661 BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
662 BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
664 rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
667 CERROR("failed to encrypt page: %d\n", rc);
678 static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
679 struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
682 struct blkcipher_desc cdesc = {
687 struct scatterlist ptxt;
688 struct scatterlist ctxt;
695 sg_init_table(&ptxt, 1);
696 sg_init_table(&ctxt, 1);
698 blocksize = crypto_blkcipher_blocksize(tfm);
699 if (desc->bd_nob_transferred % blocksize != 0) {
700 CERROR("Transfer not a multiple of block size: %d\n",
701 desc->bd_nob_transferred);
702 return GSS_S_DEFECTIVE_TOKEN;
705 for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
707 lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
708 lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
710 if (ciov->kiov_offset % blocksize != 0 ||
711 ciov->kiov_len % blocksize != 0) {
712 CERROR("Invalid bulk descriptor vector\n");
713 return GSS_S_DEFECTIVE_TOKEN;
716 /* Must adjust bytes here because we know the actual sizes after
717 * decryption. Similar to what gss_cli_ctx_unwrap_bulk does for
718 * integrity only mode */
720 /* cipher text must not exceed transferred size */
721 if (ciov->kiov_len + cnob > desc->bd_nob_transferred)
723 desc->bd_nob_transferred - cnob;
725 piov->kiov_len = ciov->kiov_len;
727 /* plain text must not exceed bulk's size */
728 if (ciov->kiov_len + pnob > desc->bd_nob)
729 piov->kiov_len = desc->bd_nob - pnob;
731 /* Taken from krb5_decrypt since it was not verified
732 * whether or not LNET guarantees these */
733 if (ciov->kiov_len + cnob > desc->bd_nob_transferred ||
734 piov->kiov_len > ciov->kiov_len) {
735 CERROR("Invalid decrypted length\n");
736 return GSS_S_FAILURE;
740 if (ciov->kiov_len == 0)
743 sg_init_table(&ctxt, 1);
744 sg_set_page(&ctxt, ciov->kiov_page, ciov->kiov_len,
748 /* In the event the plain text size is not a multiple
749 * of blocksize we decrypt in place and copy the result
750 * after the decryption */
751 if (piov->kiov_len % blocksize == 0)
752 sg_assign_page(&ptxt, piov->kiov_page);
754 rc = crypto_blkcipher_decrypt_iv(&cdesc, &ptxt, &ctxt,
757 CERROR("Decryption failed for page: %d\n", rc);
758 return GSS_S_FAILURE;
761 if (piov->kiov_len % blocksize != 0) {
762 memcpy(page_address(piov->kiov_page) +
764 page_address(ciov->kiov_page) +
769 cnob += ciov->kiov_len;
770 pnob += piov->kiov_len;
773 /* if needed, clear up the rest unused iovs */
775 while (i < desc->bd_iov_count)
776 BD_GET_KIOV(desc, i++).kiov_len = 0;
778 if (unlikely(cnob != desc->bd_nob_transferred)) {
779 CERROR("%d cipher text transferred but only %d decrypted\n",
780 desc->bd_nob_transferred, cnob);
781 return GSS_S_FAILURE;
784 if (unlikely(!adj_nob && pnob != desc->bd_nob)) {
785 CERROR("%d plain text expected but only %d received\n",
787 return GSS_S_FAILURE;
794 __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
795 struct ptlrpc_bulk_desc *desc, rawobj_t *token,
798 struct sk_ctx *skc = gss_context->internal_ctx_id;
799 size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
802 __u8 local_iv[SK_IV_SIZE];
804 LASSERT(skc->sc_session_kb.kb_tfm);
806 memset(token->data, 0, token->len);
807 if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
808 return GSS_S_FAILURE;
810 skw.skw_header.data = token->data;
811 skw.skw_header.len = sizeof(skh);
812 memcpy(skw.skw_header.data, &skh, sizeof(skh));
814 sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
815 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
816 skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
817 if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
818 desc, &skw.skw_cipher, adj_nob))
819 return GSS_S_FAILURE;
821 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
822 skw.skw_hmac.len = sht_bytes;
823 if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher,
824 desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac,
825 gss_context->hash_func))
826 return GSS_S_FAILURE;
828 return GSS_S_COMPLETE;
832 __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
833 struct ptlrpc_bulk_desc *desc,
834 rawobj_t *token, int adj_nob)
836 struct sk_ctx *skc = gss_context->internal_ctx_id;
837 size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
840 __u8 local_iv[SK_IV_SIZE];
843 LASSERT(skc->sc_session_kb.kb_tfm);
845 if (token->len < sizeof(skh) + sht_bytes)
846 return GSS_S_DEFECTIVE_TOKEN;
848 skw.skw_header.data = token->data;
849 skw.skw_header.len = sizeof(struct sk_hdr);
850 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
851 skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
852 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
853 skw.skw_hmac.len = sht_bytes;
855 skh = (struct sk_hdr *)skw.skw_header.data;
856 rc = sk_verify_header(skh);
857 if (rc != GSS_S_COMPLETE)
860 rc = sk_verify_bulk_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1,
861 &skw.skw_cipher, desc->bd_iov_count,
862 GET_ENC_KIOV(desc), desc->bd_nob,
867 sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
868 rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
869 desc, &skw.skw_cipher, adj_nob);
873 return GSS_S_COMPLETE;
877 void gss_delete_sec_context_sk(void *internal_context)
879 struct sk_ctx *sk_context = internal_context;
880 sk_delete_context(sk_context);
883 int gss_display_sk(struct gss_ctx *gss_context, char *buf, int bufsize)
885 return snprintf(buf, bufsize, "sk");
888 static struct gss_api_ops gss_sk_ops = {
889 .gss_import_sec_context = gss_import_sec_context_sk,
890 .gss_copy_reverse_context = gss_copy_reverse_context_sk,
891 .gss_inquire_context = gss_inquire_context_sk,
892 .gss_get_mic = gss_get_mic_sk,
893 .gss_verify_mic = gss_verify_mic_sk,
894 .gss_wrap = gss_wrap_sk,
895 .gss_unwrap = gss_unwrap_sk,
896 .gss_prep_bulk = gss_prep_bulk_sk,
897 .gss_wrap_bulk = gss_wrap_bulk_sk,
898 .gss_unwrap_bulk = gss_unwrap_bulk_sk,
899 .gss_delete_sec_context = gss_delete_sec_context_sk,
900 .gss_display = gss_display_sk,
903 static struct subflavor_desc gss_sk_sfs[] = {
905 .sf_subflavor = SPTLRPC_SUBFLVR_SKN,
907 .sf_service = SPTLRPC_SVC_NULL,
911 .sf_subflavor = SPTLRPC_SUBFLVR_SKA,
913 .sf_service = SPTLRPC_SVC_AUTH,
917 .sf_subflavor = SPTLRPC_SUBFLVR_SKI,
919 .sf_service = SPTLRPC_SVC_INTG,
923 .sf_subflavor = SPTLRPC_SUBFLVR_SKPI,
925 .sf_service = SPTLRPC_SVC_PRIV,
930 static struct gss_api_mech gss_sk_mech = {
931 /* .gm_owner uses default NULL value for THIS_MODULE */
933 .gm_oid = (rawobj_t) {
935 .data = "\053\006\001\004\001\311\146\215\126\001\000\001",
937 .gm_ops = &gss_sk_ops,
939 .gm_sfs = gss_sk_sfs,
942 int __init init_sk_module(void)
946 status = lgss_mech_register(&gss_sk_mech);
948 CERROR("Failed to register sk gss mechanism!\n");
953 void cleanup_sk_module(void)
955 lgss_mech_unregister(&gss_sk_mech);