4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (C) 2013, 2015, Trustees of Indiana University
25 * Copyright (c) 2014, 2016, Intel Corporation.
27 * Author: Jeremy Filizetti <jfilizet@iu.edu>
28 * Author: Andrew Korty <ajk@iu.edu>
31 #define DEBUG_SUBSYSTEM S_SEC
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/crypto.h>
36 #include <linux/mutex.h>
37 #include <crypto/ctr.h>
39 #include <libcfs/libcfs_crypto.h>
41 #include <obd_class.h>
42 #include <obd_support.h>
43 #include <lustre/lustre_user.h>
46 #include "gss_crypto.h"
47 #include "gss_internal.h"
51 #define SK_INTERFACE_VERSION 1
52 #define SK_MSG_VERSION 1
56 /* Starting number for reverse contexts. It is critical to security
57 * that reverse contexts use a different range of numbers than regular
58 * contexts because they are using the same key. Therefore the IV/nonce
59 * combination must be unique for them. To accomplish this reverse contexts
60 * use the the negative range of a 64-bit number and regular contexts use the
61 * postive range. If the same IV/nonce combination were reused it would leak
62 * information about the plaintext. */
63 #define SK_IV_REV_START (1ULL << 63)
73 struct gss_keyblock sc_session_kb;
74 enum cfs_crypto_hash_alg sc_hmac;
80 } __attribute__((packed));
82 /* The format of SK wire data is similar to that of RFC3686 ESP Payload
83 * (section 3) except instead of just an IV there is a struct sk_hdr.
84 * ---------------------------------------------------------------------
85 * | struct sk_hdr | ciphertext (variable size) | HMAC (variable size) |
86 * --------------------------------------------------------------------- */
93 static struct sk_crypt_type sk_crypt_types[] = {
94 [SK_CRYPT_AES256_CTR] = {
101 static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
103 return (len + blocksize - 1) & (~(blocksize - 1));
106 static int sk_fill_header(struct sk_ctx *skc, struct sk_hdr *skh)
109 skh->skh_version = be64_to_cpu(SK_MSG_VERSION);
111 /* Always using inc_return so we don't use our initial numbers which
112 * could be the reuse detecting numbers */
113 tmp_iv = atomic64_inc_return(&skc->sc_iv);
114 skh->skh_iv = be64_to_cpu(tmp_iv);
115 if (tmp_iv == 0 || tmp_iv == SK_IV_REV_START) {
116 CERROR("Counter looped, connection must be reset to avoid "
117 "plaintext information\n");
118 return GSS_S_FAILURE;
121 return GSS_S_COMPLETE;
124 static int sk_verify_header(struct sk_hdr *skh)
126 if (cpu_to_be64(skh->skh_version) != SK_MSG_VERSION)
127 return GSS_S_DEFECTIVE_TOKEN;
129 return GSS_S_COMPLETE;
132 void sk_construct_rfc3686_iv(__u8 *iv, __u32 nonce, __u64 partial_iv)
134 __u32 ctr = cpu_to_be32(1);
136 memcpy(iv, &nonce, CTR_RFC3686_NONCE_SIZE);
137 iv += CTR_RFC3686_NONCE_SIZE;
138 memcpy(iv, &partial_iv, CTR_RFC3686_IV_SIZE);
139 iv += CTR_RFC3686_IV_SIZE;
140 memcpy(iv, &ctr, sizeof(ctr));
143 static int sk_init_keys(struct sk_ctx *skc)
145 return gss_keyblock_init(&skc->sc_session_kb,
146 sk_crypt_types[skc->sc_crypt].cht_name, 0);
149 static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc)
151 char *ptr = inbuf->data;
152 char *end = inbuf->data + inbuf->len;
155 /* see sk_serialize_kctx() for format from userspace side */
157 if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
158 CERROR("Failed to read shared key interface version");
161 if (tmp != SK_INTERFACE_VERSION) {
162 CERROR("Invalid shared key interface version: %d\n", tmp);
167 if (gss_get_bytes(&ptr, end, &skc->sc_hmac, sizeof(skc->sc_hmac))) {
168 CERROR("Failed to read HMAC algorithm type");
171 if (skc->sc_hmac >= CFS_HASH_ALG_MAX) {
172 CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
177 if (gss_get_bytes(&ptr, end, &skc->sc_crypt, sizeof(skc->sc_crypt))) {
178 CERROR("Failed to read crypt algorithm type");
181 if (skc->sc_crypt <= SK_CRYPT_EMPTY || skc->sc_crypt >= SK_CRYPT_MAX) {
182 CERROR("Invalid crypt type: %d\n", skc->sc_crypt);
186 /* 4. expiration time */
187 if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
188 CERROR("Failed to read context expiration time");
191 skc->sc_expire = tmp + cfs_time_current_sec();
193 /* 5. host random is used as nonce for encryption */
194 if (gss_get_bytes(&ptr, end, &skc->sc_host_random,
195 sizeof(skc->sc_host_random))) {
196 CERROR("Failed to read host random ");
200 /* 6. peer random is used as nonce for decryption */
201 if (gss_get_bytes(&ptr, end, &skc->sc_peer_random,
202 sizeof(skc->sc_peer_random))) {
203 CERROR("Failed to read peer random ");
208 if (gss_get_rawobj(&ptr, end, &skc->sc_hmac_key)) {
209 CERROR("Failed to read HMAC key");
212 if (skc->sc_hmac_key.len <= SK_MIN_SIZE) {
213 CERROR("HMAC key must key must be larger than %d bytes\n",
218 /* 8. Session key, can be empty if not using privacy mode */
219 if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
220 CERROR("Failed to read session key");
227 static void sk_delete_context(struct sk_ctx *skc)
232 rawobj_free(&skc->sc_hmac_key);
233 gss_keyblock_free(&skc->sc_session_kb);
238 __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context)
241 bool privacy = false;
243 if (inbuf == NULL || inbuf->data == NULL)
244 return GSS_S_FAILURE;
248 return GSS_S_FAILURE;
250 atomic64_set(&skc->sc_iv, 0);
252 if (sk_fill_context(inbuf, skc))
255 /* Only privacy mode needs to initialize keys */
256 if (skc->sc_session_kb.kb_key.len > 0) {
258 if (sk_init_keys(skc))
262 gss_context->internal_ctx_id = skc;
263 CDEBUG(D_SEC, "successfully imported sk%s context\n",
264 privacy ? "pi" : "i");
266 return GSS_S_COMPLETE;
269 sk_delete_context(skc);
270 return GSS_S_FAILURE;
274 __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old,
275 struct gss_ctx *gss_context_new)
277 struct sk_ctx *skc_old = gss_context_old->internal_ctx_id;
278 struct sk_ctx *skc_new;
280 OBD_ALLOC_PTR(skc_new);
282 return GSS_S_FAILURE;
284 skc_new->sc_hmac = skc_old->sc_hmac;
285 skc_new->sc_crypt = skc_old->sc_crypt;
286 skc_new->sc_expire = skc_old->sc_expire;
287 skc_new->sc_host_random = skc_old->sc_host_random;
288 skc_new->sc_peer_random = skc_old->sc_peer_random;
290 atomic64_set(&skc_new->sc_iv, SK_IV_REV_START);
292 if (rawobj_dup(&skc_new->sc_hmac_key, &skc_old->sc_hmac_key))
294 if (gss_keyblock_dup(&skc_new->sc_session_kb, &skc_old->sc_session_kb))
297 /* Only privacy mode needs to initialize keys */
298 if (skc_new->sc_session_kb.kb_key.len > 0)
299 if (sk_init_keys(skc_new))
302 gss_context_new->internal_ctx_id = skc_new;
303 CDEBUG(D_SEC, "successfully copied reverse sk context\n");
305 return GSS_S_COMPLETE;
308 sk_delete_context(skc_new);
309 return GSS_S_FAILURE;
313 __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
314 unsigned long *endtime)
316 struct sk_ctx *skc = gss_context->internal_ctx_id;
318 *endtime = skc->sc_expire;
319 return GSS_S_COMPLETE;
323 __u32 sk_make_hmac(const char *alg_name, rawobj_t *key, int msg_count,
324 rawobj_t *msgs, int iov_count, lnet_kiov_t *iovs,
327 struct crypto_ahash *tfm;
330 tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC);
332 return GSS_S_FAILURE;
335 LASSERT(token->len >= crypto_ahash_digestsize(tfm));
336 if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
340 crypto_free_ahash(tfm);
345 __u32 gss_get_mic_sk(struct gss_ctx *gss_context,
352 struct sk_ctx *skc = gss_context->internal_ctx_id;
353 return sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac),
354 &skc->sc_hmac_key, message_count, messages,
355 iov_count, iovs, token);
359 u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key,
360 int message_count, rawobj_t *messages, int iov_count,
361 lnet_kiov_t *iovs, rawobj_t *token)
363 rawobj_t checksum = RAWOBJ_EMPTY;
364 __u32 rc = GSS_S_FAILURE;
366 checksum.len = cfs_crypto_hash_digestsize(algo);
367 /* What about checksum.len == 0 ??? */
369 if (token->len < checksum.len) {
370 CDEBUG(D_SEC, "Token received too short, expected %d "
371 "received %d\n", token->len, checksum.len);
372 return GSS_S_DEFECTIVE_TOKEN;
375 OBD_ALLOC_LARGE(checksum.data, checksum.len);
379 if (sk_make_hmac(cfs_crypto_hash_name(algo), key, message_count,
380 messages, iov_count, iovs, &checksum)) {
381 CDEBUG(D_SEC, "Failed to create checksum to validate\n");
385 if (memcmp(token->data, checksum.data, checksum.len)) {
386 CERROR("checksum mismatch\n");
394 OBD_FREE(checksum.data, checksum.len);
398 /* sk_verify_bulk_hmac() differs slightly from sk_verify_hmac() because all
399 * encrypted pages in the bulk descriptor are populated although we only need
400 * to decrypt up to the number of bytes actually specified from the sender
401 * (bd_nob) otherwise the calulated HMAC will be incorrect. */
403 __u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac,
404 rawobj_t *key, int msgcnt, rawobj_t *msgs,
405 int iovcnt, lnet_kiov_t *iovs, int iov_bytes,
408 rawobj_t checksum = RAWOBJ_EMPTY;
409 struct cfs_crypto_hash_desc *hdesc;
410 int rc = GSS_S_FAILURE, i;
412 checksum.len = cfs_crypto_hash_digestsize(sc_hmac);
413 if (token->len < checksum.len) {
414 CDEBUG(D_SEC, "Token received too short, expected %d "
415 "received %d\n", token->len, checksum.len);
416 return GSS_S_DEFECTIVE_TOKEN;
419 OBD_ALLOC_LARGE(checksum.data, checksum.len);
423 for (i = 0; i < msgcnt; i++) {
427 rc = cfs_crypto_hash_digest(sc_hmac, msgs[i].data, msgs[i].len,
429 checksum.data, &checksum.len);
434 hdesc = cfs_crypto_hash_init(sc_hmac, key->data, key->len);
440 for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
443 if (iovs[i].kiov_len == 0)
446 bytes = min_t(int, iov_bytes, iovs[i].kiov_len);
448 rc = cfs_crypto_hash_update_page(hdesc, iovs[i].kiov_page,
449 iovs[i].kiov_offset, bytes);
454 rc = cfs_crypto_hash_final(hdesc, checksum.data, &checksum.len);
458 if (memcmp(token->data, checksum.data, checksum.len)) {
465 OBD_FREE_LARGE(checksum.data, checksum.len);
471 __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
478 struct sk_ctx *skc = gss_context->internal_ctx_id;
479 return sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key,
480 message_count, messages, iov_count, iovs, token);
484 __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
485 rawobj_t *message, int message_buffer_length,
488 struct sk_ctx *skc = gss_context->internal_ctx_id;
489 size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
493 __u8 local_iv[SK_IV_SIZE];
494 unsigned int blocksize;
496 LASSERT(skc->sc_session_kb.kb_tfm);
498 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
499 if (gss_add_padding(message, message_buffer_length, blocksize))
500 return GSS_S_FAILURE;
502 memset(token->data, 0, token->len);
504 if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
505 return GSS_S_FAILURE;
507 skw.skw_header.data = token->data;
508 skw.skw_header.len = sizeof(skh);
509 memcpy(skw.skw_header.data, &skh, sizeof(skh));
511 sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
512 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
513 skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
514 if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message,
516 return GSS_S_FAILURE;
518 /* HMAC covers the SK header, GSS header, and ciphertext */
519 msgbufs[0] = skw.skw_header;
520 msgbufs[1] = *gss_header;
521 msgbufs[2] = skw.skw_cipher;
523 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
524 skw.skw_hmac.len = sht_bytes;
525 if (sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac), &skc->sc_hmac_key,
526 3, msgbufs, 0, NULL, &skw.skw_hmac))
527 return GSS_S_FAILURE;
529 token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len;
531 return GSS_S_COMPLETE;
535 __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
536 rawobj_t *token, rawobj_t *message)
538 struct sk_ctx *skc = gss_context->internal_ctx_id;
539 size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
543 __u8 local_iv[SK_IV_SIZE];
544 unsigned int blocksize;
547 LASSERT(skc->sc_session_kb.kb_tfm);
549 if (token->len < sizeof(skh) + sht_bytes)
550 return GSS_S_DEFECTIVE_TOKEN;
552 skw.skw_header.data = token->data;
553 skw.skw_header.len = sizeof(struct sk_hdr);
554 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
555 skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
556 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
557 skw.skw_hmac.len = sht_bytes;
559 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
560 if (skw.skw_cipher.len % blocksize != 0)
561 return GSS_S_DEFECTIVE_TOKEN;
563 skh = (struct sk_hdr *)skw.skw_header.data;
564 rc = sk_verify_header(skh);
565 if (rc != GSS_S_COMPLETE)
568 /* HMAC covers the SK header, GSS header, and ciphertext */
569 msgbufs[0] = skw.skw_header;
570 msgbufs[1] = *gss_header;
571 msgbufs[2] = skw.skw_cipher;
572 rc = sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key, 3, msgbufs,
573 0, NULL, &skw.skw_hmac);
577 sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
578 message->len = skw.skw_cipher.len;
579 if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv,
580 1, &skw.skw_cipher, message, 0))
581 return GSS_S_FAILURE;
583 return GSS_S_COMPLETE;
587 __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
588 struct ptlrpc_bulk_desc *desc)
590 struct sk_ctx *skc = gss_context->internal_ctx_id;
594 LASSERT(skc->sc_session_kb.kb_tfm);
595 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
597 for (i = 0; i < desc->bd_iov_count; i++) {
598 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
599 CERROR("offset %d not blocksize aligned\n",
600 BD_GET_KIOV(desc, i).kiov_offset);
601 return GSS_S_FAILURE;
604 BD_GET_ENC_KIOV(desc, i).kiov_offset =
605 BD_GET_KIOV(desc, i).kiov_offset;
606 BD_GET_ENC_KIOV(desc, i).kiov_len =
607 sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize);
610 return GSS_S_COMPLETE;
613 static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
614 struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
617 struct blkcipher_desc cdesc = {
622 struct scatterlist ptxt;
623 struct scatterlist ctxt;
629 blocksize = crypto_blkcipher_blocksize(tfm);
631 sg_init_table(&ptxt, 1);
632 sg_init_table(&ctxt, 1);
634 for (i = 0; i < desc->bd_iov_count; i++) {
635 sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page,
636 sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
638 BD_GET_KIOV(desc, i).kiov_offset);
641 sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
642 ptxt.length, ptxt.offset);
644 BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
645 BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
647 rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
650 CERROR("failed to encrypt page: %d\n", rc);
661 static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
662 struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
665 struct blkcipher_desc cdesc = {
670 struct scatterlist ptxt;
671 struct scatterlist ctxt;
678 sg_init_table(&ptxt, 1);
679 sg_init_table(&ctxt, 1);
681 blocksize = crypto_blkcipher_blocksize(tfm);
682 if (desc->bd_nob_transferred % blocksize != 0) {
683 CERROR("Transfer not a multiple of block size: %d\n",
684 desc->bd_nob_transferred);
685 return GSS_S_DEFECTIVE_TOKEN;
688 for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
690 lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
691 lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
693 if (ciov->kiov_offset % blocksize != 0 ||
694 ciov->kiov_len % blocksize != 0) {
695 CERROR("Invalid bulk descriptor vector\n");
696 return GSS_S_DEFECTIVE_TOKEN;
699 /* Must adjust bytes here because we know the actual sizes after
700 * decryption. Similar to what gss_cli_ctx_unwrap_bulk does for
701 * integrity only mode */
703 /* cipher text must not exceed transferred size */
704 if (ciov->kiov_len + cnob > desc->bd_nob_transferred)
706 desc->bd_nob_transferred - cnob;
708 piov->kiov_len = ciov->kiov_len;
710 /* plain text must not exceed bulk's size */
711 if (ciov->kiov_len + pnob > desc->bd_nob)
712 piov->kiov_len = desc->bd_nob - pnob;
714 /* Taken from krb5_decrypt since it was not verified
715 * whether or not LNET guarantees these */
716 if (ciov->kiov_len + cnob > desc->bd_nob_transferred ||
717 piov->kiov_len > ciov->kiov_len) {
718 CERROR("Invalid decrypted length\n");
719 return GSS_S_FAILURE;
723 if (ciov->kiov_len == 0)
726 sg_init_table(&ctxt, 1);
727 sg_set_page(&ctxt, ciov->kiov_page, ciov->kiov_len,
731 /* In the event the plain text size is not a multiple
732 * of blocksize we decrypt in place and copy the result
733 * after the decryption */
734 if (piov->kiov_len % blocksize == 0)
735 sg_assign_page(&ptxt, piov->kiov_page);
737 rc = crypto_blkcipher_decrypt_iv(&cdesc, &ptxt, &ctxt,
740 CERROR("Decryption failed for page: %d\n", rc);
741 return GSS_S_FAILURE;
744 if (piov->kiov_len % blocksize != 0) {
745 memcpy(page_address(piov->kiov_page) +
747 page_address(ciov->kiov_page) +
752 cnob += ciov->kiov_len;
753 pnob += piov->kiov_len;
756 /* if needed, clear up the rest unused iovs */
758 while (i < desc->bd_iov_count)
759 BD_GET_KIOV(desc, i++).kiov_len = 0;
761 if (unlikely(cnob != desc->bd_nob_transferred)) {
762 CERROR("%d cipher text transferred but only %d decrypted\n",
763 desc->bd_nob_transferred, cnob);
764 return GSS_S_FAILURE;
767 if (unlikely(!adj_nob && pnob != desc->bd_nob)) {
768 CERROR("%d plain text expected but only %d received\n",
770 return GSS_S_FAILURE;
777 __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
778 struct ptlrpc_bulk_desc *desc, rawobj_t *token,
781 struct sk_ctx *skc = gss_context->internal_ctx_id;
782 size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
785 __u8 local_iv[SK_IV_SIZE];
787 LASSERT(skc->sc_session_kb.kb_tfm);
789 memset(token->data, 0, token->len);
790 if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
791 return GSS_S_FAILURE;
793 skw.skw_header.data = token->data;
794 skw.skw_header.len = sizeof(skh);
795 memcpy(skw.skw_header.data, &skh, sizeof(skh));
797 sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
798 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
799 skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
800 if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
801 desc, &skw.skw_cipher, adj_nob))
802 return GSS_S_FAILURE;
804 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
805 skw.skw_hmac.len = sht_bytes;
806 if (sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac), &skc->sc_hmac_key,
807 1, &skw.skw_cipher, desc->bd_iov_count,
808 GET_ENC_KIOV(desc), &skw.skw_hmac))
809 return GSS_S_FAILURE;
811 return GSS_S_COMPLETE;
815 __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
816 struct ptlrpc_bulk_desc *desc,
817 rawobj_t *token, int adj_nob)
819 struct sk_ctx *skc = gss_context->internal_ctx_id;
820 size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
823 __u8 local_iv[SK_IV_SIZE];
826 LASSERT(skc->sc_session_kb.kb_tfm);
828 if (token->len < sizeof(skh) + sht_bytes)
829 return GSS_S_DEFECTIVE_TOKEN;
831 skw.skw_header.data = token->data;
832 skw.skw_header.len = sizeof(struct sk_hdr);
833 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
834 skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
835 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
836 skw.skw_hmac.len = cfs_crypto_hash_digestsize(skc->sc_hmac);
838 skh = (struct sk_hdr *)skw.skw_header.data;
839 rc = sk_verify_header(skh);
840 if (rc != GSS_S_COMPLETE)
843 rc = sk_verify_bulk_hmac(skc->sc_hmac,
844 &skc->sc_hmac_key, 1, &skw.skw_cipher,
845 desc->bd_iov_count, GET_ENC_KIOV(desc),
846 desc->bd_nob, &skw.skw_hmac);
850 sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
851 rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
852 desc, &skw.skw_cipher, adj_nob);
856 return GSS_S_COMPLETE;
860 void gss_delete_sec_context_sk(void *internal_context)
862 struct sk_ctx *sk_context = internal_context;
863 sk_delete_context(sk_context);
866 int gss_display_sk(struct gss_ctx *gss_context, char *buf, int bufsize)
868 return snprintf(buf, bufsize, "sk");
871 static struct gss_api_ops gss_sk_ops = {
872 .gss_import_sec_context = gss_import_sec_context_sk,
873 .gss_copy_reverse_context = gss_copy_reverse_context_sk,
874 .gss_inquire_context = gss_inquire_context_sk,
875 .gss_get_mic = gss_get_mic_sk,
876 .gss_verify_mic = gss_verify_mic_sk,
877 .gss_wrap = gss_wrap_sk,
878 .gss_unwrap = gss_unwrap_sk,
879 .gss_prep_bulk = gss_prep_bulk_sk,
880 .gss_wrap_bulk = gss_wrap_bulk_sk,
881 .gss_unwrap_bulk = gss_unwrap_bulk_sk,
882 .gss_delete_sec_context = gss_delete_sec_context_sk,
883 .gss_display = gss_display_sk,
886 static struct subflavor_desc gss_sk_sfs[] = {
888 .sf_subflavor = SPTLRPC_SUBFLVR_SKN,
890 .sf_service = SPTLRPC_SVC_NULL,
894 .sf_subflavor = SPTLRPC_SUBFLVR_SKA,
896 .sf_service = SPTLRPC_SVC_AUTH,
900 .sf_subflavor = SPTLRPC_SUBFLVR_SKI,
902 .sf_service = SPTLRPC_SVC_INTG,
906 .sf_subflavor = SPTLRPC_SUBFLVR_SKPI,
908 .sf_service = SPTLRPC_SVC_PRIV,
913 static struct gss_api_mech gss_sk_mech = {
914 /* .gm_owner uses default NULL value for THIS_MODULE */
916 .gm_oid = (rawobj_t) {
918 .data = "\053\006\001\004\001\311\146\215\126\001\000\001",
920 .gm_ops = &gss_sk_ops,
922 .gm_sfs = gss_sk_sfs,
925 int __init init_sk_module(void)
929 status = lgss_mech_register(&gss_sk_mech);
931 CERROR("Failed to register sk gss mechanism!\n");
936 void cleanup_sk_module(void)
938 lgss_mech_unregister(&gss_sk_mech);