4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (C) 2013, 2015, Trustees of Indiana University
25 * Copyright (c) 2014, 2016, Intel Corporation.
27 * Author: Jeremy Filizetti <jfilizet@iu.edu>
28 * Author: Andrew Korty <ajk@iu.edu>
31 #define DEBUG_SUBSYSTEM S_SEC
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/crypto.h>
36 #include <linux/mutex.h>
37 #include <crypto/ctr.h>
40 #include <obd_class.h>
41 #include <obd_support.h>
44 #include "gss_crypto.h"
45 #include "gss_internal.h"
49 #define SK_INTERFACE_VERSION 1
50 #define SK_MSG_VERSION 1
54 /* Starting number for reverse contexts. It is critical to security
55 * that reverse contexts use a different range of numbers than regular
56 * contexts because they are using the same key. Therefore the IV/nonce
57 * combination must be unique for them. To accomplish this reverse contexts
58 * use the the negative range of a 64-bit number and regular contexts use the
59 * postive range. If the same IV/nonce combination were reused it would leak
60 * information about the plaintext. */
61 #define SK_IV_REV_START (1ULL << 63)
64 enum cfs_crypto_crypt_alg sc_crypt;
65 enum cfs_crypto_hash_alg sc_hmac;
71 struct gss_keyblock sc_session_kb;
77 } __attribute__((packed));
79 /* The format of SK wire data is similar to that of RFC3686 ESP Payload
80 * (section 3) except instead of just an IV there is a struct sk_hdr.
81 * ---------------------------------------------------------------------
82 * | struct sk_hdr | ciphertext (variable size) | HMAC (variable size) |
83 * --------------------------------------------------------------------- */
90 static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
92 return (len + blocksize - 1) & (~(blocksize - 1));
95 static int sk_fill_header(struct sk_ctx *skc, struct sk_hdr *skh)
98 skh->skh_version = be64_to_cpu(SK_MSG_VERSION);
100 /* Always using inc_return so we don't use our initial numbers which
101 * could be the reuse detecting numbers */
102 tmp_iv = atomic64_inc_return(&skc->sc_iv);
103 skh->skh_iv = be64_to_cpu(tmp_iv);
104 if (tmp_iv == 0 || tmp_iv == SK_IV_REV_START) {
105 CERROR("Counter looped, connection must be reset to avoid "
106 "plaintext information\n");
107 return GSS_S_FAILURE;
110 return GSS_S_COMPLETE;
113 static int sk_verify_header(struct sk_hdr *skh)
115 if (cpu_to_be64(skh->skh_version) != SK_MSG_VERSION)
116 return GSS_S_DEFECTIVE_TOKEN;
118 return GSS_S_COMPLETE;
121 void sk_construct_rfc3686_iv(__u8 *iv, __u32 nonce, __u64 partial_iv)
123 __u32 ctr = cpu_to_be32(1);
125 memcpy(iv, &nonce, CTR_RFC3686_NONCE_SIZE);
126 iv += CTR_RFC3686_NONCE_SIZE;
127 memcpy(iv, &partial_iv, CTR_RFC3686_IV_SIZE);
128 iv += CTR_RFC3686_IV_SIZE;
129 memcpy(iv, &ctr, sizeof(ctr));
132 static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc)
134 char *ptr = inbuf->data;
135 char *end = inbuf->data + inbuf->len;
136 char sk_hmac[CRYPTO_MAX_ALG_NAME];
137 char sk_crypt[CRYPTO_MAX_ALG_NAME];
140 /* see sk_serialize_kctx() for format from userspace side */
142 if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
143 CERROR("Failed to read shared key interface version\n");
146 if (tmp != SK_INTERFACE_VERSION) {
147 CERROR("Invalid shared key interface version: %d\n", tmp);
152 if (gss_get_bytes(&ptr, end, &sk_hmac, sizeof(sk_hmac))) {
153 CERROR("Failed to read HMAC algorithm type\n");
157 skc->sc_hmac = cfs_crypto_hash_alg(sk_hmac);
158 if (skc->sc_hmac != CFS_HASH_ALG_NULL &&
159 skc->sc_hmac != CFS_HASH_ALG_SHA256 &&
160 skc->sc_hmac != CFS_HASH_ALG_SHA512) {
161 CERROR("Invalid hmac type: %s\n", sk_hmac);
166 if (gss_get_bytes(&ptr, end, &sk_crypt, sizeof(sk_crypt))) {
167 CERROR("Failed to read crypt algorithm type\n");
171 skc->sc_crypt = cfs_crypto_crypt_alg(sk_crypt);
172 if (skc->sc_crypt == CFS_CRYPT_ALG_UNKNOWN) {
173 CERROR("Invalid crypt type: %s\n", sk_crypt);
177 /* 4. expiration time */
178 if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
179 CERROR("Failed to read context expiration time\n");
182 skc->sc_expire = tmp + ktime_get_real_seconds();
184 /* 5. host random is used as nonce for encryption */
185 if (gss_get_bytes(&ptr, end, &skc->sc_host_random,
186 sizeof(skc->sc_host_random))) {
187 CERROR("Failed to read host random\n");
191 /* 6. peer random is used as nonce for decryption */
192 if (gss_get_bytes(&ptr, end, &skc->sc_peer_random,
193 sizeof(skc->sc_peer_random))) {
194 CERROR("Failed to read peer random\n");
199 if (gss_get_rawobj(&ptr, end, &skc->sc_hmac_key)) {
200 CERROR("Failed to read HMAC key\n");
203 if (skc->sc_hmac_key.len <= SK_MIN_SIZE) {
204 CERROR("HMAC key must key must be larger than %d bytes\n",
209 /* 8. Session key, can be empty if not using privacy mode */
210 if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
211 CERROR("Failed to read session key\n");
218 static void sk_delete_context(struct sk_ctx *skc)
223 rawobj_free(&skc->sc_hmac_key);
224 gss_keyblock_free(&skc->sc_session_kb);
229 __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context)
232 bool privacy = false;
234 if (inbuf == NULL || inbuf->data == NULL)
235 return GSS_S_FAILURE;
239 return GSS_S_FAILURE;
241 atomic64_set(&skc->sc_iv, 0);
243 if (sk_fill_context(inbuf, skc))
246 /* Only privacy mode needs to initialize keys */
247 if (skc->sc_session_kb.kb_key.len > 0) {
249 if (gss_keyblock_init(&skc->sc_session_kb,
250 cfs_crypto_crypt_name(skc->sc_crypt), 0))
254 gss_context->internal_ctx_id = skc;
255 CDEBUG(D_SEC, "successfully imported sk%s context\n",
256 privacy ? " (with privacy)" : "");
258 return GSS_S_COMPLETE;
261 sk_delete_context(skc);
262 return GSS_S_FAILURE;
266 __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old,
267 struct gss_ctx *gss_context_new)
269 struct sk_ctx *skc_old = gss_context_old->internal_ctx_id;
270 struct sk_ctx *skc_new;
272 OBD_ALLOC_PTR(skc_new);
274 return GSS_S_FAILURE;
276 skc_new->sc_hmac = skc_old->sc_hmac;
277 skc_new->sc_crypt = skc_old->sc_crypt;
278 skc_new->sc_expire = skc_old->sc_expire;
279 skc_new->sc_host_random = skc_old->sc_host_random;
280 skc_new->sc_peer_random = skc_old->sc_peer_random;
282 atomic64_set(&skc_new->sc_iv, SK_IV_REV_START);
284 if (rawobj_dup(&skc_new->sc_hmac_key, &skc_old->sc_hmac_key))
286 if (gss_keyblock_dup(&skc_new->sc_session_kb, &skc_old->sc_session_kb))
289 /* Only privacy mode needs to initialize keys */
290 if (skc_new->sc_session_kb.kb_key.len > 0)
291 if (gss_keyblock_init(&skc_new->sc_session_kb,
292 cfs_crypto_crypt_name(skc_new->sc_crypt),
296 gss_context_new->internal_ctx_id = skc_new;
297 CDEBUG(D_SEC, "successfully copied reverse sk context\n");
299 return GSS_S_COMPLETE;
302 sk_delete_context(skc_new);
303 return GSS_S_FAILURE;
307 __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
310 struct sk_ctx *skc = gss_context->internal_ctx_id;
312 *endtime = skc->sc_expire;
313 return GSS_S_COMPLETE;
317 u32 sk_make_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key, int msg_count,
318 rawobj_t *msgs, int iov_count, struct bio_vec *iovs,
319 rawobj_t *token, digest_hash hash_func)
321 struct ahash_request *req;
324 req = cfs_crypto_hash_init(algo, key->data, key->len);
327 goto out_init_failed;
332 rc2 = hash_func(req, NULL, msg_count, msgs, iov_count,
335 rc2 = gss_digest_hash(req, NULL, msg_count, msgs, iov_count,
338 rc = cfs_crypto_hash_final(req, token->data, &token->len);
342 return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
346 __u32 gss_get_mic_sk(struct gss_ctx *gss_context,
350 struct bio_vec *iovs,
353 struct sk_ctx *skc = gss_context->internal_ctx_id;
355 return sk_make_hmac(skc->sc_hmac,
356 &skc->sc_hmac_key, message_count, messages,
357 iov_count, iovs, token, gss_context->hash_func);
361 u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key,
362 int message_count, rawobj_t *messages,
363 int iov_count, struct bio_vec *iovs,
364 rawobj_t *token, digest_hash hash_func)
366 rawobj_t checksum = RAWOBJ_EMPTY;
367 __u32 rc = GSS_S_FAILURE;
369 checksum.len = cfs_crypto_hash_digestsize(algo);
370 if (token->len < checksum.len) {
371 CDEBUG(D_SEC, "Token received too short, expected %d "
372 "received %d\n", token->len, checksum.len);
373 return GSS_S_DEFECTIVE_TOKEN;
376 OBD_ALLOC_LARGE(checksum.data, checksum.len);
380 if (sk_make_hmac(algo, key, message_count,
381 messages, iov_count, iovs, &checksum,
383 CDEBUG(D_SEC, "Failed to create checksum to validate\n");
387 if (memcmp(token->data, checksum.data, checksum.len)) {
388 CERROR("checksum mismatch\n");
396 OBD_FREE(checksum.data, checksum.len);
400 /* sk_verify_bulk_hmac() differs slightly from sk_verify_hmac() because all
401 * encrypted pages in the bulk descriptor are populated although we only need
402 * to decrypt up to the number of bytes actually specified from the sender
403 * (bd_nob) otherwise the calulated HMAC will be incorrect. */
405 u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac, rawobj_t *key,
406 int msgcnt, rawobj_t *msgs, int iovcnt,
407 struct bio_vec *iovs, int iov_bytes, rawobj_t *token)
409 rawobj_t checksum = RAWOBJ_EMPTY;
410 struct ahash_request *req;
411 struct scatterlist sg[1];
417 checksum.len = cfs_crypto_hash_digestsize(sc_hmac);
418 if (token->len < checksum.len) {
419 CDEBUG(D_SEC, "Token received too short, expected %d "
420 "received %d\n", token->len, checksum.len);
421 return GSS_S_DEFECTIVE_TOKEN;
424 OBD_ALLOC_LARGE(checksum.data, checksum.len);
426 return GSS_S_FAILURE;
428 req = cfs_crypto_hash_init(sc_hmac, key->data, key->len);
434 for (i = 0; i < msgcnt; i++) {
438 rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
442 ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
443 rc = crypto_ahash_update(req);
445 gss_teardown_sgtable(&sgt);
449 gss_teardown_sgtable(&sgt);
452 for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
453 if (iovs[i].bv_len == 0)
456 bytes = min_t(int, iov_bytes, iovs[i].bv_len);
459 sg_init_table(sg, 1);
460 sg_set_page(&sg[0], iovs[i].bv_page, bytes,
462 ahash_request_set_crypt(req, sg, NULL, bytes);
463 rc = crypto_ahash_update(req);
469 cfs_crypto_hash_final(req, checksum.data, &checksum.len);
473 if (memcmp(token->data, checksum.data, checksum.len))
479 OBD_FREE_LARGE(checksum.data, checksum.len);
485 __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
489 struct bio_vec *iovs,
492 struct sk_ctx *skc = gss_context->internal_ctx_id;
494 return sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key,
495 message_count, messages, iov_count, iovs, token,
496 gss_context->hash_func);
500 __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
501 rawobj_t *message, int message_buffer_length,
504 struct sk_ctx *skc = gss_context->internal_ctx_id;
505 size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
509 __u8 local_iv[SK_IV_SIZE];
510 unsigned int blocksize;
512 LASSERT(skc->sc_session_kb.kb_tfm);
514 blocksize = crypto_sync_skcipher_blocksize(skc->sc_session_kb.kb_tfm);
515 if (gss_add_padding(message, message_buffer_length, blocksize))
516 return GSS_S_FAILURE;
518 memset(token->data, 0, token->len);
520 if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
521 return GSS_S_FAILURE;
523 skw.skw_header.data = token->data;
524 skw.skw_header.len = sizeof(skh);
525 memcpy(skw.skw_header.data, &skh, sizeof(skh));
527 sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
528 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
529 skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
530 if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message,
532 return GSS_S_FAILURE;
534 /* HMAC covers the SK header, GSS header, and ciphertext */
535 msgbufs[0] = skw.skw_header;
536 msgbufs[1] = *gss_header;
537 msgbufs[2] = skw.skw_cipher;
539 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
540 skw.skw_hmac.len = sht_bytes;
541 if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key,
542 3, msgbufs, 0, NULL, &skw.skw_hmac,
543 gss_context->hash_func))
544 return GSS_S_FAILURE;
546 token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len;
548 return GSS_S_COMPLETE;
552 __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
553 rawobj_t *token, rawobj_t *message)
555 struct sk_ctx *skc = gss_context->internal_ctx_id;
556 size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
560 __u8 local_iv[SK_IV_SIZE];
561 unsigned int blocksize;
564 LASSERT(skc->sc_session_kb.kb_tfm);
566 if (token->len < sizeof(skh) + sht_bytes)
567 return GSS_S_DEFECTIVE_TOKEN;
569 skw.skw_header.data = token->data;
570 skw.skw_header.len = sizeof(struct sk_hdr);
571 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
572 skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
573 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
574 skw.skw_hmac.len = sht_bytes;
576 blocksize = crypto_sync_skcipher_blocksize(skc->sc_session_kb.kb_tfm);
577 if (skw.skw_cipher.len % blocksize != 0)
578 return GSS_S_DEFECTIVE_TOKEN;
580 skh = (struct sk_hdr *)skw.skw_header.data;
581 rc = sk_verify_header(skh);
582 if (rc != GSS_S_COMPLETE)
585 /* HMAC covers the SK header, GSS header, and ciphertext */
586 msgbufs[0] = skw.skw_header;
587 msgbufs[1] = *gss_header;
588 msgbufs[2] = skw.skw_cipher;
589 rc = sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key, 3, msgbufs,
590 0, NULL, &skw.skw_hmac, gss_context->hash_func);
594 sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
595 message->len = skw.skw_cipher.len;
596 if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv,
597 1, &skw.skw_cipher, message, 0))
598 return GSS_S_FAILURE;
600 return GSS_S_COMPLETE;
604 __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
605 struct ptlrpc_bulk_desc *desc)
607 struct sk_ctx *skc = gss_context->internal_ctx_id;
611 LASSERT(skc->sc_session_kb.kb_tfm);
612 blocksize = crypto_sync_skcipher_blocksize(skc->sc_session_kb.kb_tfm);
614 for (i = 0; i < desc->bd_iov_count; i++) {
615 if (desc->bd_vec[i].bv_offset & blocksize) {
616 CERROR("offset %d not blocksize aligned\n",
617 desc->bd_vec[i].bv_offset);
618 return GSS_S_FAILURE;
621 desc->bd_enc_vec[i].bv_offset =
622 desc->bd_vec[i].bv_offset;
623 desc->bd_enc_vec[i].bv_len =
624 sk_block_mask(desc->bd_vec[i].bv_len, blocksize);
627 return GSS_S_COMPLETE;
630 static __u32 sk_encrypt_bulk(struct crypto_sync_skcipher *tfm, __u8 *iv,
631 struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
634 struct scatterlist ptxt;
635 struct scatterlist ctxt;
640 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
642 blocksize = crypto_sync_skcipher_blocksize(tfm);
644 sg_init_table(&ptxt, 1);
645 sg_init_table(&ctxt, 1);
647 skcipher_request_set_sync_tfm(req, tfm);
648 skcipher_request_set_callback(req, 0, NULL, NULL);
650 for (i = 0; i < desc->bd_iov_count; i++) {
651 sg_set_page(&ptxt, desc->bd_vec[i].bv_page,
652 sk_block_mask(desc->bd_vec[i].bv_len,
654 desc->bd_vec[i].bv_offset);
657 sg_set_page(&ctxt, desc->bd_enc_vec[i].bv_page,
658 ptxt.length, ptxt.offset);
660 desc->bd_enc_vec[i].bv_offset = ctxt.offset;
661 desc->bd_enc_vec[i].bv_len = ctxt.length;
663 skcipher_request_set_crypt(req, &ptxt, &ctxt, ptxt.length, iv);
664 rc = crypto_skcipher_encrypt_iv(req, &ctxt, &ptxt, ptxt.length);
666 CERROR("failed to encrypt page: %d\n", rc);
667 skcipher_request_zero(req);
671 skcipher_request_zero(req);
679 static __u32 sk_decrypt_bulk(struct crypto_sync_skcipher *tfm, __u8 *iv,
680 struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
683 struct scatterlist ptxt;
684 struct scatterlist ctxt;
690 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
692 sg_init_table(&ptxt, 1);
693 sg_init_table(&ctxt, 1);
695 blocksize = crypto_sync_skcipher_blocksize(tfm);
696 if (desc->bd_nob_transferred % blocksize != 0) {
697 CERROR("Transfer not a multiple of block size: %d\n",
698 desc->bd_nob_transferred);
699 return GSS_S_DEFECTIVE_TOKEN;
702 skcipher_request_set_sync_tfm(req, tfm);
703 skcipher_request_set_callback(req, 0, NULL, NULL);
705 for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
707 struct bio_vec *piov = &desc->bd_vec[i];
708 struct bio_vec *ciov = &desc->bd_enc_vec[i];
710 if (ciov->bv_offset % blocksize != 0 ||
711 ciov->bv_len % blocksize != 0) {
712 CERROR("Invalid bulk descriptor vector\n");
713 skcipher_request_zero(req);
714 return GSS_S_DEFECTIVE_TOKEN;
717 /* Must adjust bytes here because we know the actual sizes after
718 * decryption. Similar to what gss_cli_ctx_unwrap_bulk does for
719 * integrity only mode */
721 /* cipher text must not exceed transferred size */
722 if (ciov->bv_len + cnob > desc->bd_nob_transferred)
724 desc->bd_nob_transferred - cnob;
726 piov->bv_len = ciov->bv_len;
728 /* plain text must not exceed bulk's size */
729 if (ciov->bv_len + pnob > desc->bd_nob)
730 piov->bv_len = desc->bd_nob - pnob;
732 /* Taken from krb5_decrypt since it was not verified
733 * whether or not LNET guarantees these */
734 if (ciov->bv_len + cnob > desc->bd_nob_transferred ||
735 piov->bv_len > ciov->bv_len) {
736 CERROR("Invalid decrypted length\n");
737 skcipher_request_zero(req);
738 return GSS_S_FAILURE;
742 if (ciov->bv_len == 0)
745 sg_init_table(&ctxt, 1);
746 sg_set_page(&ctxt, ciov->bv_page, ciov->bv_len,
750 /* In the event the plain text size is not a multiple
751 * of blocksize we decrypt in place and copy the result
752 * after the decryption */
753 if (piov->bv_len % blocksize == 0)
754 sg_assign_page(&ptxt, piov->bv_page);
756 skcipher_request_set_crypt(req, &ctxt, &ptxt, ptxt.length, iv);
757 rc = crypto_skcipher_decrypt_iv(req, &ptxt, &ctxt, ptxt.length);
759 CERROR("Decryption failed for page: %d\n", rc);
760 skcipher_request_zero(req);
761 return GSS_S_FAILURE;
764 if (piov->bv_len % blocksize != 0) {
765 memcpy(page_address(piov->bv_page) +
767 page_address(ciov->bv_page) +
772 cnob += ciov->bv_len;
773 pnob += piov->bv_len;
775 skcipher_request_zero(req);
777 /* if needed, clear up the rest unused iovs */
779 while (i < desc->bd_iov_count)
780 desc->bd_vec[i++].bv_len = 0;
782 if (unlikely(cnob != desc->bd_nob_transferred)) {
783 CERROR("%d cipher text transferred but only %d decrypted\n",
784 desc->bd_nob_transferred, cnob);
785 return GSS_S_FAILURE;
788 if (unlikely(!adj_nob && pnob != desc->bd_nob)) {
789 CERROR("%d plain text expected but only %d received\n",
791 return GSS_S_FAILURE;
798 __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
799 struct ptlrpc_bulk_desc *desc, rawobj_t *token,
802 struct sk_ctx *skc = gss_context->internal_ctx_id;
803 size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
806 __u8 local_iv[SK_IV_SIZE];
808 LASSERT(skc->sc_session_kb.kb_tfm);
810 memset(token->data, 0, token->len);
811 if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
812 return GSS_S_FAILURE;
814 skw.skw_header.data = token->data;
815 skw.skw_header.len = sizeof(skh);
816 memcpy(skw.skw_header.data, &skh, sizeof(skh));
818 sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
819 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
820 skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
821 if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
822 desc, &skw.skw_cipher, adj_nob))
823 return GSS_S_FAILURE;
825 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
826 skw.skw_hmac.len = sht_bytes;
827 if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher,
828 desc->bd_iov_count, desc->bd_enc_vec, &skw.skw_hmac,
829 gss_context->hash_func))
830 return GSS_S_FAILURE;
832 return GSS_S_COMPLETE;
836 __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
837 struct ptlrpc_bulk_desc *desc,
838 rawobj_t *token, int adj_nob)
840 struct sk_ctx *skc = gss_context->internal_ctx_id;
841 size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
844 __u8 local_iv[SK_IV_SIZE];
847 LASSERT(skc->sc_session_kb.kb_tfm);
849 if (token->len < sizeof(skh) + sht_bytes)
850 return GSS_S_DEFECTIVE_TOKEN;
852 skw.skw_header.data = token->data;
853 skw.skw_header.len = sizeof(struct sk_hdr);
854 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
855 skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
856 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
857 skw.skw_hmac.len = sht_bytes;
859 skh = (struct sk_hdr *)skw.skw_header.data;
860 rc = sk_verify_header(skh);
861 if (rc != GSS_S_COMPLETE)
864 rc = sk_verify_bulk_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1,
865 &skw.skw_cipher, desc->bd_iov_count,
866 desc->bd_enc_vec, desc->bd_nob,
871 sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
872 rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
873 desc, &skw.skw_cipher, adj_nob);
877 return GSS_S_COMPLETE;
881 void gss_delete_sec_context_sk(void *internal_context)
883 struct sk_ctx *sk_context = internal_context;
884 sk_delete_context(sk_context);
887 int gss_display_sk(struct gss_ctx *gss_context, char *buf, int bufsize)
889 return scnprintf(buf, bufsize, "sk");
892 static struct gss_api_ops gss_sk_ops = {
893 .gss_import_sec_context = gss_import_sec_context_sk,
894 .gss_copy_reverse_context = gss_copy_reverse_context_sk,
895 .gss_inquire_context = gss_inquire_context_sk,
896 .gss_get_mic = gss_get_mic_sk,
897 .gss_verify_mic = gss_verify_mic_sk,
898 .gss_wrap = gss_wrap_sk,
899 .gss_unwrap = gss_unwrap_sk,
900 .gss_prep_bulk = gss_prep_bulk_sk,
901 .gss_wrap_bulk = gss_wrap_bulk_sk,
902 .gss_unwrap_bulk = gss_unwrap_bulk_sk,
903 .gss_delete_sec_context = gss_delete_sec_context_sk,
904 .gss_display = gss_display_sk,
907 static struct subflavor_desc gss_sk_sfs[] = {
909 .sf_subflavor = SPTLRPC_SUBFLVR_SKN,
911 .sf_service = SPTLRPC_SVC_NULL,
915 .sf_subflavor = SPTLRPC_SUBFLVR_SKA,
917 .sf_service = SPTLRPC_SVC_AUTH,
921 .sf_subflavor = SPTLRPC_SUBFLVR_SKI,
923 .sf_service = SPTLRPC_SVC_INTG,
927 .sf_subflavor = SPTLRPC_SUBFLVR_SKPI,
929 .sf_service = SPTLRPC_SVC_PRIV,
934 static struct gss_api_mech gss_sk_mech = {
935 /* .gm_owner uses default NULL value for THIS_MODULE */
937 .gm_oid = (rawobj_t) {
939 .data = "\053\006\001\004\001\311\146\215\126\001\000\001",
941 .gm_ops = &gss_sk_ops,
943 .gm_sfs = gss_sk_sfs,
946 int __init init_sk_module(void)
950 status = lgss_mech_register(&gss_sk_mech);
952 CERROR("Failed to register sk gss mechanism!\n");
957 void cleanup_sk_module(void)
959 lgss_mech_unregister(&gss_sk_mech);