4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (C) 2013, 2015, Trustees of Indiana University
25 * Copyright (c) 2014, Intel Corporation.
27 * Author: Jeremy Filizetti <jfilizet@iu.edu>
28 * Author: Andrew Korty <ajk@iu.edu>
31 #define DEBUG_SUBSYSTEM S_SEC
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/crypto.h>
36 #include <linux/mutex.h>
37 #include <crypto/ctr.h>
40 #include <obd_class.h>
41 #include <obd_support.h>
42 #include <lustre/lustre_user.h>
45 #include "gss_crypto.h"
46 #include "gss_internal.h"
50 #define SK_INTERFACE_VERSION 1
51 #define SK_MSG_VERSION 1
55 /* Starting number for reverse contexts. It is critical to security
56 * that reverse contexts use a different range of numbers than regular
57 * contexts because they are using the same key. Therefore the IV/nonce
58 * combination must be unique for them. To accomplish this reverse contexts
59 * use the the negative range of a 64-bit number and regular contexts use the
60 * postive range. If the same IV/nonce combination were reused it would leak
61 * information about the plaintext. */
62 #define SK_IV_REV_START (1UL << 63)
72 struct gss_keyblock sc_session_kb;
78 } __attribute__((packed));
80 /* The format of SK wire data is similar to that of RFC3686 ESP Payload
81 * (section 3) except instead of just an IV there is a struct sk_hdr.
82 * ---------------------------------------------------------------------
83 * | struct sk_hdr | ciphertext (variable size) | HMAC (variable size) |
84 * --------------------------------------------------------------------- */
91 static struct sk_crypt_type sk_crypt_types[] = {
92 [SK_CRYPT_AES256_CTR] = {
93 .sct_name = "ctr(aes)",
98 static struct sk_hmac_type sk_hmac_types[] = {
100 .sht_name = "hmac(sha256)",
104 .sht_name = "hmac(sha512)",
109 static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
111 return (len + blocksize - 1) & (~(blocksize - 1));
114 static int sk_fill_header(struct sk_ctx *skc, struct sk_hdr *skh)
117 skh->skh_version = be64_to_cpu(SK_MSG_VERSION);
119 /* Always using inc_return so we don't use our initial numbers which
120 * could be the reuse detecting numbers */
121 tmp_iv = atomic64_inc_return(&skc->sc_iv);
122 skh->skh_iv = be64_to_cpu(tmp_iv);
123 if (tmp_iv == 0 || tmp_iv == SK_IV_REV_START) {
124 CERROR("Counter looped, connection must be reset to avoid "
125 "plaintext information\n");
126 return GSS_S_FAILURE;
129 return GSS_S_COMPLETE;
132 static int sk_verify_header(struct sk_hdr *skh)
134 if (cpu_to_be64(skh->skh_version) != SK_MSG_VERSION)
135 return GSS_S_DEFECTIVE_TOKEN;
137 return GSS_S_COMPLETE;
140 void sk_construct_rfc3686_iv(__u8 *iv, __u32 nonce, __u64 partial_iv)
142 __u32 ctr = cpu_to_be32(1);
144 memcpy(iv, &nonce, CTR_RFC3686_NONCE_SIZE);
145 iv += CTR_RFC3686_NONCE_SIZE;
146 memcpy(iv, &partial_iv, CTR_RFC3686_IV_SIZE);
147 iv += CTR_RFC3686_IV_SIZE;
148 memcpy(iv, &ctr, sizeof(ctr));
151 static int sk_init_keys(struct sk_ctx *skc)
153 return gss_keyblock_init(&skc->sc_session_kb,
154 sk_crypt_types[skc->sc_crypt].sct_name, 0);
157 static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc)
159 char *ptr = inbuf->data;
160 char *end = inbuf->data + inbuf->len;
163 /* see sk_serialize_kctx() for format from userspace side */
165 if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
166 CERROR("Failed to read shared key interface version");
169 if (tmp != SK_INTERFACE_VERSION) {
170 CERROR("Invalid shared key interface version: %d\n", tmp);
175 if (gss_get_bytes(&ptr, end, &skc->sc_hmac, sizeof(skc->sc_hmac))) {
176 CERROR("Failed to read HMAC algorithm type");
179 if (skc->sc_hmac <= SK_HMAC_EMPTY || skc->sc_hmac >= SK_HMAC_MAX) {
180 CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
185 if (gss_get_bytes(&ptr, end, &skc->sc_crypt, sizeof(skc->sc_crypt))) {
186 CERROR("Failed to read crypt algorithm type");
189 if (skc->sc_crypt <= SK_CRYPT_EMPTY || skc->sc_crypt >= SK_CRYPT_MAX) {
190 CERROR("Invalid crypt type: %d\n", skc->sc_crypt);
194 /* 4. expiration time */
195 if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
196 CERROR("Failed to read context expiration time");
199 skc->sc_expire = tmp + cfs_time_current_sec();
201 /* 5. host random is used as nonce for encryption */
202 if (gss_get_bytes(&ptr, end, &skc->sc_host_random,
203 sizeof(skc->sc_host_random))) {
204 CERROR("Failed to read host random ");
208 /* 6. peer random is used as nonce for decryption */
209 if (gss_get_bytes(&ptr, end, &skc->sc_peer_random,
210 sizeof(skc->sc_peer_random))) {
211 CERROR("Failed to read peer random ");
216 if (gss_get_rawobj(&ptr, end, &skc->sc_hmac_key)) {
217 CERROR("Failed to read HMAC key");
220 if (skc->sc_hmac_key.len <= SK_MIN_SIZE) {
221 CERROR("HMAC key must key must be larger than %d bytes\n",
226 /* 8. Session key, can be empty if not using privacy mode */
227 if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
228 CERROR("Failed to read session key");
235 static void sk_delete_context(struct sk_ctx *skc)
240 rawobj_free(&skc->sc_hmac_key);
241 gss_keyblock_free(&skc->sc_session_kb);
246 __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context)
249 bool privacy = false;
251 if (inbuf == NULL || inbuf->data == NULL)
252 return GSS_S_FAILURE;
256 return GSS_S_FAILURE;
258 atomic64_set(&skc->sc_iv, 0);
260 if (sk_fill_context(inbuf, skc))
263 /* Only privacy mode needs to initialize keys */
264 if (skc->sc_session_kb.kb_key.len > 0) {
266 if (sk_init_keys(skc))
270 gss_context->internal_ctx_id = skc;
271 CDEBUG(D_SEC, "successfully imported sk%s context\n",
272 privacy ? "pi" : "i");
274 return GSS_S_COMPLETE;
277 sk_delete_context(skc);
278 return GSS_S_FAILURE;
282 __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old,
283 struct gss_ctx *gss_context_new)
285 struct sk_ctx *skc_old = gss_context_old->internal_ctx_id;
286 struct sk_ctx *skc_new;
288 OBD_ALLOC_PTR(skc_new);
290 return GSS_S_FAILURE;
292 skc_new->sc_hmac = skc_old->sc_hmac;
293 skc_new->sc_crypt = skc_old->sc_crypt;
294 skc_new->sc_expire = skc_old->sc_expire;
295 skc_new->sc_host_random = skc_old->sc_host_random;
296 skc_new->sc_peer_random = skc_old->sc_peer_random;
298 atomic64_set(&skc_new->sc_iv, SK_IV_REV_START);
300 if (rawobj_dup(&skc_new->sc_hmac_key, &skc_old->sc_hmac_key))
302 if (gss_keyblock_dup(&skc_new->sc_session_kb, &skc_old->sc_session_kb))
305 /* Only privacy mode needs to initialize keys */
306 if (skc_new->sc_session_kb.kb_key.len > 0)
307 if (sk_init_keys(skc_new))
310 gss_context_new->internal_ctx_id = skc_new;
311 CDEBUG(D_SEC, "successfully copied reverse sk context\n");
313 return GSS_S_COMPLETE;
316 sk_delete_context(skc_new);
317 return GSS_S_FAILURE;
321 __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
322 unsigned long *endtime)
324 struct sk_ctx *skc = gss_context->internal_ctx_id;
326 *endtime = skc->sc_expire;
327 return GSS_S_COMPLETE;
331 __u32 sk_make_hmac(char *alg_name, rawobj_t *key, int msg_count, rawobj_t *msgs,
332 int iov_count, lnet_kiov_t *iovs, rawobj_t *token)
334 struct crypto_hash *tfm;
337 tfm = crypto_alloc_hash(alg_name, 0, 0);
339 return GSS_S_FAILURE;
342 LASSERT(token->len >= crypto_hash_digestsize(tfm));
343 if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
347 crypto_free_hash(tfm);
352 __u32 gss_get_mic_sk(struct gss_ctx *gss_context,
359 struct sk_ctx *skc = gss_context->internal_ctx_id;
360 return sk_make_hmac(sk_hmac_types[skc->sc_hmac].sht_name,
361 &skc->sc_hmac_key, message_count, messages,
362 iov_count, iovs, token);
366 __u32 sk_verify_hmac(struct sk_hmac_type *sht, rawobj_t *key, int message_count,
367 rawobj_t *messages, int iov_count, lnet_kiov_t *iovs,
370 rawobj_t checksum = RAWOBJ_EMPTY;
371 __u32 rc = GSS_S_FAILURE;
373 checksum.len = sht->sht_bytes;
374 if (token->len < checksum.len) {
375 CDEBUG(D_SEC, "Token received too short, expected %d "
376 "received %d\n", token->len, checksum.len);
377 return GSS_S_DEFECTIVE_TOKEN;
380 OBD_ALLOC_LARGE(checksum.data, checksum.len);
384 if (sk_make_hmac(sht->sht_name, key, message_count, messages,
385 iov_count, iovs, &checksum)) {
386 CDEBUG(D_SEC, "Failed to create checksum to validate\n");
390 if (memcmp(token->data, checksum.data, checksum.len)) {
391 CERROR("checksum mismatch\n");
399 OBD_FREE(checksum.data, checksum.len);
403 /* sk_verify_bulk_hmac() differs slightly from sk_verify_hmac() because all
404 * encrypted pages in the bulk descriptor are populated although we only need
405 * to decrypt up to the number of bytes actually specified from the sender
406 * (bd_nob) otherwise the calulated HMAC will be incorrect. */
408 __u32 sk_verify_bulk_hmac(struct sk_hmac_type *sht, rawobj_t *key,
409 int msgcnt, rawobj_t *msgs, int iovcnt,
410 lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token)
412 rawobj_t checksum = RAWOBJ_EMPTY;
413 struct crypto_hash *tfm;
414 struct hash_desc desc = {
418 struct scatterlist sg[1];
422 int rc = GSS_S_FAILURE;
424 checksum.len = sht->sht_bytes;
425 if (token->len < checksum.len) {
426 CDEBUG(D_SEC, "Token received too short, expected %d "
427 "received %d\n", token->len, checksum.len);
428 return GSS_S_DEFECTIVE_TOKEN;
431 OBD_ALLOC_LARGE(checksum.data, checksum.len);
435 tfm = crypto_alloc_hash(sht->sht_name, 0, 0);
441 LASSERT(token->len >= crypto_hash_digestsize(tfm));
443 rc = crypto_hash_setkey(tfm, key->data, key->len);
447 rc = crypto_hash_init(&desc);
451 for (i = 0; i < msgcnt; i++) {
452 if (msgs[i].len == 0)
455 rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
459 rc = crypto_hash_update(&desc, sg, msgs[i].len);
461 gss_teardown_sgtable(&sgt);
465 gss_teardown_sgtable(&sgt);
468 for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
469 if (iovs[i].kiov_len == 0)
472 bytes = min_t(int, iov_bytes, iovs[i].kiov_len);
475 sg_init_table(sg, 1);
476 sg_set_page(&sg[0], iovs[i].kiov_page, bytes,
477 iovs[i].kiov_offset);
478 rc = crypto_hash_update(&desc, sg, bytes);
483 crypto_hash_final(&desc, checksum.data);
485 if (memcmp(token->data, checksum.data, checksum.len)) {
493 crypto_free_hash(tfm);
496 OBD_FREE_LARGE(checksum.data, checksum.len);
502 __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
509 struct sk_ctx *skc = gss_context->internal_ctx_id;
510 return sk_verify_hmac(&sk_hmac_types[skc->sc_hmac], &skc->sc_hmac_key,
511 message_count, messages, iov_count, iovs, token);
515 __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
516 rawobj_t *message, int message_buffer_length,
519 struct sk_ctx *skc = gss_context->internal_ctx_id;
520 struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
524 __u8 local_iv[SK_IV_SIZE];
525 unsigned int blocksize;
527 LASSERT(skc->sc_session_kb.kb_tfm);
529 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
530 if (gss_add_padding(message, message_buffer_length, blocksize))
531 return GSS_S_FAILURE;
533 memset(token->data, 0, token->len);
535 if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
536 return GSS_S_FAILURE;
538 skw.skw_header.data = token->data;
539 skw.skw_header.len = sizeof(skh);
540 memcpy(skw.skw_header.data, &skh, sizeof(skh));
542 sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
543 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
544 skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
545 if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message,
547 return GSS_S_FAILURE;
549 /* HMAC covers the SK header, GSS header, and ciphertext */
550 msgbufs[0] = skw.skw_header;
551 msgbufs[1] = *gss_header;
552 msgbufs[2] = skw.skw_cipher;
554 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
555 skw.skw_hmac.len = sht->sht_bytes;
556 if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 3, msgbufs, 0,
557 NULL, &skw.skw_hmac))
558 return GSS_S_FAILURE;
560 token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len;
562 return GSS_S_COMPLETE;
566 __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
567 rawobj_t *token, rawobj_t *message)
569 struct sk_ctx *skc = gss_context->internal_ctx_id;
570 struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
574 __u8 local_iv[SK_IV_SIZE];
575 unsigned int blocksize;
578 LASSERT(skc->sc_session_kb.kb_tfm);
580 if (token->len < sizeof(skh) + sht->sht_bytes)
581 return GSS_S_DEFECTIVE_TOKEN;
583 skw.skw_header.data = token->data;
584 skw.skw_header.len = sizeof(struct sk_hdr);
585 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
586 skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
587 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
588 skw.skw_hmac.len = sht->sht_bytes;
590 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
591 if (skw.skw_cipher.len % blocksize != 0)
592 return GSS_S_DEFECTIVE_TOKEN;
594 skh = (struct sk_hdr *)skw.skw_header.data;
595 rc = sk_verify_header(skh);
596 if (rc != GSS_S_COMPLETE)
599 /* HMAC covers the SK header, GSS header, and ciphertext */
600 msgbufs[0] = skw.skw_header;
601 msgbufs[1] = *gss_header;
602 msgbufs[2] = skw.skw_cipher;
603 rc = sk_verify_hmac(sht, &skc->sc_hmac_key, 3, msgbufs, 0, NULL,
608 sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
609 message->len = skw.skw_cipher.len;
610 if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv,
611 1, &skw.skw_cipher, message, 0))
612 return GSS_S_FAILURE;
614 return GSS_S_COMPLETE;
618 __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
619 struct ptlrpc_bulk_desc *desc)
621 struct sk_ctx *skc = gss_context->internal_ctx_id;
625 LASSERT(skc->sc_session_kb.kb_tfm);
626 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
628 for (i = 0; i < desc->bd_iov_count; i++) {
629 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
630 CERROR("offset %d not blocksize aligned\n",
631 BD_GET_KIOV(desc, i).kiov_offset);
632 return GSS_S_FAILURE;
635 BD_GET_ENC_KIOV(desc, i).kiov_offset =
636 BD_GET_KIOV(desc, i).kiov_offset;
637 BD_GET_ENC_KIOV(desc, i).kiov_len =
638 sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize);
641 return GSS_S_COMPLETE;
644 static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
645 struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
648 struct blkcipher_desc cdesc = {
653 struct scatterlist ptxt;
654 struct scatterlist ctxt;
660 blocksize = crypto_blkcipher_blocksize(tfm);
662 sg_init_table(&ptxt, 1);
663 sg_init_table(&ctxt, 1);
665 for (i = 0; i < desc->bd_iov_count; i++) {
666 sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page,
667 sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
669 BD_GET_KIOV(desc, i).kiov_offset);
672 sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
673 ptxt.length, ptxt.offset);
675 BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
676 BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
678 rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
681 CERROR("failed to encrypt page: %d\n", rc);
692 static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
693 struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
696 struct blkcipher_desc cdesc = {
701 struct scatterlist ptxt;
702 struct scatterlist ctxt;
709 sg_init_table(&ptxt, 1);
710 sg_init_table(&ctxt, 1);
712 blocksize = crypto_blkcipher_blocksize(tfm);
713 if (desc->bd_nob_transferred % blocksize != 0) {
714 CERROR("Transfer not a multiple of block size: %d\n",
715 desc->bd_nob_transferred);
716 return GSS_S_DEFECTIVE_TOKEN;
719 for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
721 lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
722 lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
724 if (ciov->kiov_offset % blocksize != 0 ||
725 ciov->kiov_len % blocksize != 0) {
726 CERROR("Invalid bulk descriptor vector\n");
727 return GSS_S_DEFECTIVE_TOKEN;
730 /* Must adjust bytes here because we know the actual sizes after
731 * decryption. Similar to what gss_cli_ctx_unwrap_bulk does for
732 * integrity only mode */
734 /* cipher text must not exceed transferred size */
735 if (ciov->kiov_len + cnob > desc->bd_nob_transferred)
737 desc->bd_nob_transferred - cnob;
739 piov->kiov_len = ciov->kiov_len;
741 /* plain text must not exceed bulk's size */
742 if (ciov->kiov_len + pnob > desc->bd_nob)
743 piov->kiov_len = desc->bd_nob - pnob;
745 /* Taken from krb5_decrypt since it was not verified
746 * whether or not LNET guarantees these */
747 if (ciov->kiov_len + cnob > desc->bd_nob_transferred ||
748 piov->kiov_len > ciov->kiov_len) {
749 CERROR("Invalid decrypted length\n");
750 return GSS_S_FAILURE;
754 if (ciov->kiov_len == 0)
757 sg_init_table(&ctxt, 1);
758 sg_set_page(&ctxt, ciov->kiov_page, ciov->kiov_len,
762 /* In the event the plain text size is not a multiple
763 * of blocksize we decrypt in place and copy the result
764 * after the decryption */
765 if (piov->kiov_len % blocksize == 0)
766 sg_assign_page(&ptxt, piov->kiov_page);
768 rc = crypto_blkcipher_decrypt_iv(&cdesc, &ptxt, &ctxt,
771 CERROR("Decryption failed for page: %d\n", rc);
772 return GSS_S_FAILURE;
775 if (piov->kiov_len % blocksize != 0) {
776 memcpy(page_address(piov->kiov_page) +
778 page_address(ciov->kiov_page) +
783 cnob += ciov->kiov_len;
784 pnob += piov->kiov_len;
787 /* if needed, clear up the rest unused iovs */
789 while (i < desc->bd_iov_count)
790 BD_GET_KIOV(desc, i++).kiov_len = 0;
792 if (unlikely(cnob != desc->bd_nob_transferred)) {
793 CERROR("%d cipher text transferred but only %d decrypted\n",
794 desc->bd_nob_transferred, cnob);
795 return GSS_S_FAILURE;
798 if (unlikely(!adj_nob && pnob != desc->bd_nob)) {
799 CERROR("%d plain text expected but only %d received\n",
801 return GSS_S_FAILURE;
808 __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
809 struct ptlrpc_bulk_desc *desc, rawobj_t *token,
812 struct sk_ctx *skc = gss_context->internal_ctx_id;
813 struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
816 __u8 local_iv[SK_IV_SIZE];
818 LASSERT(skc->sc_session_kb.kb_tfm);
820 memset(token->data, 0, token->len);
821 if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
822 return GSS_S_FAILURE;
824 skw.skw_header.data = token->data;
825 skw.skw_header.len = sizeof(skh);
826 memcpy(skw.skw_header.data, &skh, sizeof(skh));
828 sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
829 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
830 skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
831 if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
832 desc, &skw.skw_cipher, adj_nob))
833 return GSS_S_FAILURE;
835 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
836 skw.skw_hmac.len = sht->sht_bytes;
837 if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 1, &skw.skw_cipher,
838 desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac))
839 return GSS_S_FAILURE;
841 return GSS_S_COMPLETE;
845 __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
846 struct ptlrpc_bulk_desc *desc,
847 rawobj_t *token, int adj_nob)
849 struct sk_ctx *skc = gss_context->internal_ctx_id;
850 struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
853 __u8 local_iv[SK_IV_SIZE];
856 LASSERT(skc->sc_session_kb.kb_tfm);
858 if (token->len < sizeof(skh) + sht->sht_bytes)
859 return GSS_S_DEFECTIVE_TOKEN;
861 skw.skw_header.data = token->data;
862 skw.skw_header.len = sizeof(struct sk_hdr);
863 skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
864 skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
865 skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
866 skw.skw_hmac.len = sht->sht_bytes;
868 skh = (struct sk_hdr *)skw.skw_header.data;
869 rc = sk_verify_header(skh);
870 if (rc != GSS_S_COMPLETE)
873 rc = sk_verify_bulk_hmac(&sk_hmac_types[skc->sc_hmac],
874 &skc->sc_hmac_key, 1, &skw.skw_cipher,
875 desc->bd_iov_count, GET_ENC_KIOV(desc),
876 desc->bd_nob, &skw.skw_hmac);
880 sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
881 rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
882 desc, &skw.skw_cipher, adj_nob);
886 return GSS_S_COMPLETE;
890 void gss_delete_sec_context_sk(void *internal_context)
892 struct sk_ctx *sk_context = internal_context;
893 sk_delete_context(sk_context);
896 int gss_display_sk(struct gss_ctx *gss_context, char *buf, int bufsize)
898 return snprintf(buf, bufsize, "sk");
901 static struct gss_api_ops gss_sk_ops = {
902 .gss_import_sec_context = gss_import_sec_context_sk,
903 .gss_copy_reverse_context = gss_copy_reverse_context_sk,
904 .gss_inquire_context = gss_inquire_context_sk,
905 .gss_get_mic = gss_get_mic_sk,
906 .gss_verify_mic = gss_verify_mic_sk,
907 .gss_wrap = gss_wrap_sk,
908 .gss_unwrap = gss_unwrap_sk,
909 .gss_prep_bulk = gss_prep_bulk_sk,
910 .gss_wrap_bulk = gss_wrap_bulk_sk,
911 .gss_unwrap_bulk = gss_unwrap_bulk_sk,
912 .gss_delete_sec_context = gss_delete_sec_context_sk,
913 .gss_display = gss_display_sk,
916 static struct subflavor_desc gss_sk_sfs[] = {
918 .sf_subflavor = SPTLRPC_SUBFLVR_SKN,
920 .sf_service = SPTLRPC_SVC_NULL,
924 .sf_subflavor = SPTLRPC_SUBFLVR_SKA,
926 .sf_service = SPTLRPC_SVC_AUTH,
930 .sf_subflavor = SPTLRPC_SUBFLVR_SKI,
932 .sf_service = SPTLRPC_SVC_INTG,
936 .sf_subflavor = SPTLRPC_SUBFLVR_SKPI,
938 .sf_service = SPTLRPC_SVC_PRIV,
944 * currently we leave module owner NULL
946 static struct gss_api_mech gss_sk_mech = {
947 .gm_owner = NULL, /*THIS_MODULE, */
949 .gm_oid = (rawobj_t) {
951 "\053\006\001\004\001\311\146\215\126\001\000\001",
953 .gm_ops = &gss_sk_ops,
955 .gm_sfs = gss_sk_sfs,
958 int __init init_sk_module(void)
962 status = lgss_mech_register(&gss_sk_mech);
964 CERROR("Failed to register sk gss mechanism!\n");
969 void cleanup_sk_module(void)
971 lgss_mech_unregister(&gss_sk_mech);