4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (C) 2013, 2015, Trustees of Indiana University
25 * Copyright (c) 2014, Intel Corporation.
27 * Author: Jeremy Filizetti <jfilizet@iu.edu>
28 * Author: Andrew Korty <ajk@iu.edu>
31 #define DEBUG_SUBSYSTEM S_SEC
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/crypto.h>
36 #include <linux/mutex.h>
39 #include <obd_class.h>
40 #include <obd_support.h>
41 #include <lustre/lustre_user.h>
44 #include "gss_crypto.h"
45 #include "gss_internal.h"
49 #define SK_INTERFACE_VERSION 1
57 rawobj_t sc_shared_key;
59 struct gss_keyblock sc_session_kb;
62 static struct sk_crypt_type sk_crypt_types[] = {
63 [SK_CRYPT_AES_CTR] = {
64 .sct_name = "ctr(aes)",
69 static struct sk_hmac_type sk_hmac_types[] = {
71 .sht_name = "hmac(sha256)",
75 .sht_name = "hmac(sha512)",
80 static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
82 return (len + blocksize - 1) & (~(blocksize - 1));
85 static int sk_init_keys(struct sk_ctx *skc)
90 rc = gss_keyblock_init(&skc->sc_session_kb,
91 sk_crypt_types[skc->sc_crypt].sct_name, 0);
95 ivsize = crypto_blkcipher_ivsize(skc->sc_session_kb.kb_tfm);
96 if (skc->sc_iv.len != ivsize) {
97 CERROR("IV size for algorithm (%d) does not match provided IV "
98 "size: %d\n", ivsize, skc->sc_iv.len);
102 crypto_blkcipher_set_iv(skc->sc_session_kb.kb_tfm,
103 skc->sc_iv.data, skc->sc_iv.len);
108 static int fill_sk_context(rawobj_t *inbuf, struct sk_ctx *skc)
110 char *ptr = inbuf->data;
111 char *end = inbuf->data + inbuf->len;
114 /* see sk_serialize_kctx() for format from userspace side */
116 if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
117 CERROR("Failed to read shared key interface version");
120 if (tmp != SK_INTERFACE_VERSION) {
121 CERROR("Invalid shared key interface version: %d\n", tmp);
126 if (gss_get_bytes(&ptr, end, &skc->sc_hmac, sizeof(skc->sc_hmac))) {
127 CERROR("Failed to read HMAC algorithm type");
130 if (skc->sc_hmac >= SK_HMAC_MAX) {
131 CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
136 if (gss_get_bytes(&ptr, end, &skc->sc_crypt, sizeof(skc->sc_crypt))) {
137 CERROR("Failed to read crypt algorithm type");
140 if (skc->sc_crypt >= SK_CRYPT_MAX) {
141 CERROR("Invalid crypt type: %d\n", skc->sc_crypt);
145 /* 4. expiration time */
146 if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
147 CERROR("Failed to read context expiration time");
150 skc->sc_expire = tmp + cfs_time_current_sec();
153 if (gss_get_rawobj(&ptr, end, &skc->sc_shared_key)) {
154 CERROR("Failed to read shared key");
157 if (skc->sc_shared_key.len <= SK_MIN_SIZE) {
158 CERROR("Shared key must key must be larger than %d bytes\n",
163 /* 6. IV, can be empty if not using privacy mode */
164 if (gss_get_rawobj(&ptr, end, &skc->sc_iv)) {
165 CERROR("Failed to read initialization vector ");
169 /* 7. Session key, can be empty if not using privacy mode */
170 if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
171 CERROR("Failed to read session key");
178 static void delete_sk_context(struct sk_ctx *skc)
182 gss_keyblock_free(&skc->sc_session_kb);
183 rawobj_free(&skc->sc_iv);
184 rawobj_free(&skc->sc_shared_key);
188 __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context)
191 bool privacy = false;
193 if (inbuf == NULL || inbuf->data == NULL)
194 return GSS_S_FAILURE;
198 return GSS_S_FAILURE;
200 if (fill_sk_context(inbuf, skc))
203 /* Only privacy mode needs to initialize keys */
204 if (skc->sc_session_kb.kb_key.len > 0) {
206 if (sk_init_keys(skc))
210 gss_context->internal_ctx_id = skc;
211 CDEBUG(D_SEC, "successfully imported sk%s context\n",
212 privacy ? "pi" : "i");
214 return GSS_S_COMPLETE;
217 delete_sk_context(skc);
219 return GSS_S_FAILURE;
223 __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old,
224 struct gss_ctx *gss_context_new)
226 struct sk_ctx *skc_old = gss_context_old->internal_ctx_id;
227 struct sk_ctx *skc_new;
229 OBD_ALLOC_PTR(skc_new);
231 return GSS_S_FAILURE;
233 skc_new->sc_crypt = skc_old->sc_crypt;
234 skc_new->sc_hmac = skc_old->sc_hmac;
235 skc_new->sc_expire = skc_old->sc_expire;
236 if (rawobj_dup(&skc_new->sc_shared_key, &skc_old->sc_shared_key))
238 if (rawobj_dup(&skc_new->sc_iv, &skc_old->sc_iv))
240 if (gss_keyblock_dup(&skc_new->sc_session_kb, &skc_old->sc_session_kb))
243 /* Only privacy mode needs to initialize keys */
244 if (skc_new->sc_session_kb.kb_key.len > 0)
245 if (sk_init_keys(skc_new))
248 gss_context_new->internal_ctx_id = skc_new;
249 CDEBUG(D_SEC, "successfully copied reverse sk context\n");
251 return GSS_S_COMPLETE;
254 delete_sk_context(skc_new);
255 OBD_FREE_PTR(skc_new);
256 return GSS_S_FAILURE;
260 __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
261 unsigned long *endtime)
263 struct sk_ctx *skc = gss_context->internal_ctx_id;
265 *endtime = skc->sc_expire;
266 return GSS_S_COMPLETE;
270 __u32 sk_make_checksum(char *alg_name, rawobj_t *key,
271 int msg_count, rawobj_t *msgs,
272 int iov_count, lnet_kiov_t *iovs,
275 struct crypto_hash *tfm;
278 tfm = crypto_alloc_hash(alg_name, 0, 0);
280 return GSS_S_FAILURE;
283 LASSERT(token->len >= crypto_hash_digestsize(tfm));
284 if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
288 crypto_free_hash(tfm);
293 __u32 gss_get_mic_sk(struct gss_ctx *gss_context,
300 struct sk_ctx *skc = gss_context->internal_ctx_id;
301 return sk_make_checksum(sk_hmac_types[skc->sc_hmac].sht_name,
302 &skc->sc_shared_key, message_count, messages,
303 iov_count, iovs, token);
307 __u32 sk_verify_checksum(struct sk_hmac_type *sht,
315 rawobj_t checksum = RAWOBJ_EMPTY;
316 __u32 rc = GSS_S_FAILURE;
318 checksum.len = sht->sht_bytes;
319 if (token->len < checksum.len) {
320 CDEBUG(D_SEC, "Token received too short, expected %d "
321 "received %d\n", token->len, checksum.len);
322 return GSS_S_DEFECTIVE_TOKEN;
325 OBD_ALLOC_LARGE(checksum.data, checksum.len);
329 if (sk_make_checksum(sht->sht_name, key, message_count,
330 messages, iov_count, iovs, &checksum)) {
331 CDEBUG(D_SEC, "Failed to create checksum to validate\n");
335 if (memcmp(token->data, checksum.data, checksum.len)) {
336 CERROR("checksum mismatch\n");
344 OBD_FREE(checksum.data, checksum.len);
349 __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
356 struct sk_ctx *skc = gss_context->internal_ctx_id;
357 return sk_verify_checksum(&sk_hmac_types[skc->sc_hmac],
358 &skc->sc_shared_key, message_count, messages,
359 iov_count, iovs, token);
363 __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
364 rawobj_t *message, int message_buffer_length,
367 struct sk_ctx *skc = gss_context->internal_ctx_id;
368 struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
372 unsigned int blocksize;
374 LASSERT(skc->sc_session_kb.kb_tfm);
375 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
377 if (gss_add_padding(message, message_buffer_length, blocksize))
378 return GSS_S_FAILURE;
380 /* Only encrypting the message data */
381 cipher.data = token->data;
382 cipher.len = token->len - sht->sht_bytes;
383 if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, 0, 1, message,
385 return GSS_S_FAILURE;
387 /* Checksum covers the GSS header followed by the encrypted message */
388 msgbufs[0].len = gss_header->len;
389 msgbufs[0].data = gss_header->data;
390 msgbufs[1].len = cipher.len;
391 msgbufs[1].data = cipher.data;
393 LASSERT(cipher.len + sht->sht_bytes <= token->len);
394 checksum.data = token->data + cipher.len;
395 checksum.len = sht->sht_bytes;
396 if (sk_make_checksum(sht->sht_name, &skc->sc_shared_key, 2, msgbufs, 0,
398 return GSS_S_FAILURE;
400 token->len = cipher.len + checksum.len;
402 return GSS_S_COMPLETE;
406 __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
407 rawobj_t *token, rawobj_t *message)
409 struct sk_ctx *skc = gss_context->internal_ctx_id;
410 struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
414 unsigned int blocksize;
417 LASSERT(skc->sc_session_kb.kb_tfm);
418 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
420 if (token->len < sht->sht_bytes)
421 return GSS_S_DEFECTIVE_TOKEN;
423 cipher.data = token->data;
424 cipher.len = token->len - sht->sht_bytes;
425 checksum.data = token->data + cipher.len;
426 checksum.len = sht->sht_bytes;
428 if (cipher.len % blocksize != 0)
429 return GSS_S_DEFECTIVE_TOKEN;
431 /* Checksum covers the GSS header followed by the encrypted message */
432 msgbufs[0].len = gss_header->len;
433 msgbufs[0].data = gss_header->data;
434 msgbufs[1].len = cipher.len;
435 msgbufs[1].data = cipher.data;
436 rc = sk_verify_checksum(sht, &skc->sc_shared_key, 2, msgbufs, 0, NULL,
441 message->len = cipher.len;
442 if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, 0, 1, &cipher,
444 return GSS_S_FAILURE;
446 return GSS_S_COMPLETE;
450 __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
451 struct ptlrpc_bulk_desc *desc)
453 struct sk_ctx *skc = gss_context->internal_ctx_id;
457 LASSERT(skc->sc_session_kb.kb_tfm);
458 blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
460 for (i = 0; i < desc->bd_iov_count; i++) {
461 if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
462 CERROR("offset %d not blocksize aligned\n",
463 BD_GET_KIOV(desc, i).kiov_offset);
464 return GSS_S_FAILURE;
467 BD_GET_ENC_KIOV(desc, i).kiov_offset =
468 BD_GET_KIOV(desc, i).kiov_offset;
469 BD_GET_ENC_KIOV(desc, i).kiov_len =
470 sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize);
473 return GSS_S_COMPLETE;
476 static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm,
477 struct ptlrpc_bulk_desc *desc,
481 struct blkcipher_desc cdesc = {
486 struct scatterlist ptxt;
487 struct scatterlist ctxt;
493 blocksize = crypto_blkcipher_blocksize(tfm);
495 sg_init_table(&ptxt, 1);
496 sg_init_table(&ctxt, 1);
498 for (i = 0; i < desc->bd_iov_count; i++) {
499 sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page,
500 sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
502 BD_GET_KIOV(desc, i).kiov_offset);
506 sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
507 ptxt.length, ptxt.offset);
509 BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
510 BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
512 rc = crypto_blkcipher_encrypt(&cdesc, &ctxt, &ptxt,
515 CERROR("failed to encrypt page: %d\n", rc);
526 static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm,
527 struct ptlrpc_bulk_desc *desc,
531 struct blkcipher_desc cdesc = {
536 struct scatterlist ptxt;
537 struct scatterlist ctxt;
544 sg_init_table(&ptxt, 1);
545 sg_init_table(&ctxt, 1);
547 blocksize = crypto_blkcipher_blocksize(tfm);
548 if (desc->bd_nob_transferred % blocksize != 0) {
549 CERROR("Transfer not a multiple of block size: %d\n",
550 desc->bd_nob_transferred);
551 return GSS_S_DEFECTIVE_TOKEN;
554 for (i = 0; i < desc->bd_iov_count; i++) {
555 lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
556 lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
558 if (piov->kiov_offset % blocksize != 0 ||
559 piov->kiov_len % blocksize != 0) {
560 CERROR("Invalid bulk descriptor vector\n");
561 return GSS_S_DEFECTIVE_TOKEN;
564 /* Must adjust bytes here because we know the actual sizes after
565 * decryption. Similar to what gss_cli_ctx_unwrap_bulk does for
566 * integrity only mode */
568 /* cipher text must not exceed transferred size */
569 if (ciov->kiov_len + cnob > desc->bd_nob_transferred)
571 desc->bd_nob_transferred - cnob;
573 piov->kiov_len = ciov->kiov_len;
575 /* plain text must not exceed bulk's size */
576 if (ciov->kiov_len + pnob > desc->bd_nob)
577 piov->kiov_len = desc->bd_nob - pnob;
579 /* Taken from krb5_decrypt since it was not verified
580 * whether or not LNET guarantees these */
581 if (ciov->kiov_len + cnob > desc->bd_nob_transferred ||
582 piov->kiov_len > ciov->kiov_len) {
583 CERROR("Invalid decrypted length\n");
584 return GSS_S_FAILURE;
588 if (ciov->kiov_len == 0)
591 sg_init_table(&ctxt, 1);
592 sg_set_page(&ctxt, ciov->kiov_page, ciov->kiov_len,
596 /* In the event the plain text size is not a multiple
597 * of blocksize we decrypt in place and copy the result
598 * after the decryption */
599 if (piov->kiov_len % blocksize == 0)
600 sg_assign_page(&ptxt, piov->kiov_page);
602 rc = crypto_blkcipher_decrypt(&cdesc, &ptxt, &ctxt,
605 CERROR("Decryption failed for page: %d\n", rc);
606 return GSS_S_FAILURE;
609 if (piov->kiov_len % blocksize != 0) {
610 memcpy(page_address(piov->kiov_page) +
612 page_address(ciov->kiov_page) +
617 cnob += ciov->kiov_len;
618 pnob += piov->kiov_len;
621 /* if needed, clear up the rest unused iovs */
623 while (i < desc->bd_iov_count)
624 BD_GET_KIOV(desc, i++).kiov_len = 0;
626 if (unlikely(cnob != desc->bd_nob_transferred)) {
627 CERROR("%d cipher text transferred but only %d decrypted\n",
628 desc->bd_nob_transferred, cnob);
629 return GSS_S_FAILURE;
632 if (unlikely(!adj_nob && pnob != desc->bd_nob)) {
633 CERROR("%d plain text expected but only %d received\n",
635 return GSS_S_FAILURE;
642 __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
643 struct ptlrpc_bulk_desc *desc, rawobj_t *token,
646 struct sk_ctx *skc = gss_context->internal_ctx_id;
647 struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
648 rawobj_t cipher = RAWOBJ_EMPTY;
649 rawobj_t checksum = RAWOBJ_EMPTY;
651 cipher.data = token->data;
652 cipher.len = token->len - sht->sht_bytes;
654 if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, desc, &cipher, adj_nob))
655 return GSS_S_FAILURE;
657 checksum.data = token->data + cipher.len;
658 checksum.len = sht->sht_bytes;
660 if (sk_make_checksum(sht->sht_name, &skc->sc_shared_key, 1, &cipher, 0,
662 return GSS_S_FAILURE;
664 return GSS_S_COMPLETE;
668 __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
669 struct ptlrpc_bulk_desc *desc,
670 rawobj_t *token, int adj_nob)
672 struct sk_ctx *skc = gss_context->internal_ctx_id;
673 struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
674 rawobj_t cipher = RAWOBJ_EMPTY;
675 rawobj_t checksum = RAWOBJ_EMPTY;
678 cipher.data = token->data;
679 cipher.len = token->len - sht->sht_bytes;
680 checksum.data = token->data + cipher.len;
681 checksum.len = sht->sht_bytes;
683 rc = sk_verify_checksum(&sk_hmac_types[skc->sc_hmac],
684 &skc->sc_shared_key, 1, &cipher, 0, NULL,
689 rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, desc, &cipher, adj_nob);
693 return GSS_S_COMPLETE;
697 void gss_delete_sec_context_sk(void *internal_context)
699 struct sk_ctx *sk_context = internal_context;
700 delete_sk_context(sk_context);
701 OBD_FREE_PTR(sk_context);
704 int gss_display_sk(struct gss_ctx *gss_context, char *buf, int bufsize)
706 return snprintf(buf, bufsize, "sk");
709 static struct gss_api_ops gss_sk_ops = {
710 .gss_import_sec_context = gss_import_sec_context_sk,
711 .gss_copy_reverse_context = gss_copy_reverse_context_sk,
712 .gss_inquire_context = gss_inquire_context_sk,
713 .gss_get_mic = gss_get_mic_sk,
714 .gss_verify_mic = gss_verify_mic_sk,
715 .gss_wrap = gss_wrap_sk,
716 .gss_unwrap = gss_unwrap_sk,
717 .gss_prep_bulk = gss_prep_bulk_sk,
718 .gss_wrap_bulk = gss_wrap_bulk_sk,
719 .gss_unwrap_bulk = gss_unwrap_bulk_sk,
720 .gss_delete_sec_context = gss_delete_sec_context_sk,
721 .gss_display = gss_display_sk,
724 static struct subflavor_desc gss_sk_sfs[] = {
726 .sf_subflavor = SPTLRPC_SUBFLVR_SKI,
728 .sf_service = SPTLRPC_SVC_INTG,
732 .sf_subflavor = SPTLRPC_SUBFLVR_SKPI,
734 .sf_service = SPTLRPC_SVC_PRIV,
740 * currently we leave module owner NULL
742 static struct gss_api_mech gss_sk_mech = {
743 .gm_owner = NULL, /*THIS_MODULE, */
745 .gm_oid = (rawobj_t) {
747 "\053\006\001\004\001\311\146\215\126\001\000\001",
749 .gm_ops = &gss_sk_ops,
751 .gm_sfs = gss_sk_sfs,
754 int __init init_sk_module(void)
758 status = lgss_mech_register(&gss_sk_mech);
760 CERROR("Failed to register sk gss mechanism!\n");
765 void cleanup_sk_module(void)
767 lgss_mech_unregister(&gss_sk_mech);