2 * Modifications for Lustre
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
6 * Copyright (c) 2011, 2014, Intel Corporation.
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * linux/net/sunrpc/gss_krb5_mech.c
13 * linux/net/sunrpc/gss_krb5_crypto.c
14 * linux/net/sunrpc/gss_krb5_seal.c
15 * linux/net/sunrpc/gss_krb5_seqnum.c
16 * linux/net/sunrpc/gss_krb5_unseal.c
18 * Copyright (c) 2001 The Regents of the University of Michigan.
19 * All rights reserved.
21 * Andy Adamson <andros@umich.edu>
22 * J. Bruce Fields <bfields@umich.edu>
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
28 * 1. Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 * 3. Neither the name of the University nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #define DEBUG_SUBSYSTEM S_SEC
53 #include <libcfs/linux/linux-crypto.h>
55 #include <obd_support.h>
57 #include "gss_internal.h"
58 #include "gss_crypto.h"
60 int gss_keyblock_init(struct gss_keyblock *kb, char *alg_name,
65 kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0);
66 if (IS_ERR(kb->kb_tfm)) {
67 rc = PTR_ERR(kb->kb_tfm);
69 CERROR("failed to alloc tfm: %s, mode %d: rc = %d\n", alg_name,
74 rc = crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data,
77 CERROR("failed to set %s key, len %d, rc = %d\n", alg_name,
85 void gss_keyblock_free(struct gss_keyblock *kb)
87 rawobj_free(&kb->kb_key);
89 crypto_free_blkcipher(kb->kb_tfm);
92 int gss_keyblock_dup(struct gss_keyblock *new, struct gss_keyblock *kb)
94 return rawobj_dup(&new->kb_key, &kb->kb_key);
97 int gss_get_bytes(char **ptr, const char *end, void *res, size_t len)
102 if (q > end || q < p)
109 int gss_get_rawobj(char **ptr, const char *end, rawobj_t *res)
115 if (gss_get_bytes(&p, end, &len, sizeof(len)))
119 if (q > end || q < p)
122 /* Support empty objects */
124 OBD_ALLOC_LARGE(res->data, len);
134 memcpy(res->data, p, len);
139 int gss_get_keyblock(char **ptr, const char *end,
140 struct gss_keyblock *kb, __u32 keysize)
145 OBD_ALLOC_LARGE(buf, keysize);
149 rc = gss_get_bytes(ptr, end, buf, keysize);
151 OBD_FREE_LARGE(buf, keysize);
155 kb->kb_key.len = keysize;
156 kb->kb_key.data = buf;
161 * Should be used for buffers allocated with k/vmalloc().
163 * Dispose of @sgt with gss_teardown_sgtable().
165 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
166 * in cases where a single sg is sufficient. No attempt to reduce the
167 * number of sgs by squeezing physically contiguous pages together is
168 * made though, for simplicity.
170 * This function is copied from the ceph filesystem code.
172 int gss_setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
173 const void *buf, unsigned int buf_len)
175 struct scatterlist *sg;
176 const bool is_vmalloc = is_vmalloc_addr(buf);
177 unsigned int off = offset_in_page(buf);
178 unsigned int chunk_cnt = 1;
179 unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
184 memset(sgt, 0, sizeof(*sgt));
189 chunk_cnt = chunk_len >> PAGE_SHIFT;
190 chunk_len = PAGE_SIZE;
194 rc = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
198 WARN_ON_ONCE(chunk_cnt != 1);
199 sg_init_table(prealloc_sg, 1);
200 sgt->sgl = prealloc_sg;
201 sgt->nents = sgt->orig_nents = 1;
204 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
206 unsigned int len = min(chunk_len - off, buf_len);
209 page = vmalloc_to_page(buf);
211 page = virt_to_page(buf);
213 sg_set_page(sg, page, len, off);
220 WARN_ON_ONCE(buf_len != 0);
225 void gss_teardown_sgtable(struct sg_table *sgt)
227 if (sgt->orig_nents > 1)
231 int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv,
232 const void *in, void *out, size_t length)
234 struct blkcipher_desc desc;
235 struct scatterlist sg;
236 struct sg_table sg_out;
237 __u8 local_iv[16] = {0};
242 desc.info = local_iv;
245 if (length % crypto_blkcipher_blocksize(tfm) != 0) {
246 CERROR("output length %zu mismatch blocksize %d\n",
247 length, crypto_blkcipher_blocksize(tfm));
251 if (crypto_blkcipher_ivsize(tfm) > ARRAY_SIZE(local_iv)) {
252 CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
257 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
259 memcpy(out, in, length);
261 ret = gss_setup_sgtable(&sg_out, &sg, out, length);
266 ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
268 ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
270 gss_teardown_sgtable(&sg_out);
275 int gss_digest_hmac(struct crypto_hash *tfm,
278 int msgcnt, rawobj_t *msgs,
279 int iovcnt, lnet_kiov_t *iovs,
282 struct hash_desc desc = {
286 struct scatterlist sg[1];
291 rc = crypto_hash_setkey(tfm, key->data, key->len);
295 rc = crypto_hash_init(&desc);
299 for (i = 0; i < msgcnt; i++) {
300 if (msgs[i].len == 0)
303 rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
306 rc = crypto_hash_update(&desc, sg, msgs[i].len);
310 gss_teardown_sgtable(&sgt);
313 for (i = 0; i < iovcnt; i++) {
314 if (iovs[i].kiov_len == 0)
317 sg_init_table(sg, 1);
318 sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
319 iovs[i].kiov_offset);
320 rc = crypto_hash_update(&desc, sg, iovs[i].kiov_len);
326 rc = gss_setup_sgtable(&sgt, sg, hdr, sizeof(*hdr));
329 rc = crypto_hash_update(&desc, sg, sizeof(hdr->len));
333 gss_teardown_sgtable(&sgt);
336 return crypto_hash_final(&desc, cksum->data);
339 int gss_digest_norm(struct crypto_hash *tfm,
340 struct gss_keyblock *kb,
342 int msgcnt, rawobj_t *msgs,
343 int iovcnt, lnet_kiov_t *iovs,
346 struct hash_desc desc;
347 struct scatterlist sg[1];
356 rc = crypto_hash_init(&desc);
360 for (i = 0; i < msgcnt; i++) {
361 if (msgs[i].len == 0)
364 rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
368 rc = crypto_hash_update(&desc, sg, msgs[i].len);
372 gss_teardown_sgtable(&sgt);
375 for (i = 0; i < iovcnt; i++) {
376 if (iovs[i].kiov_len == 0)
379 sg_init_table(sg, 1);
380 sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
381 iovs[i].kiov_offset);
382 rc = crypto_hash_update(&desc, sg, iovs[i].kiov_len);
388 rc = gss_setup_sgtable(&sgt, sg, hdr, sizeof(*hdr));
392 rc = crypto_hash_update(&desc, sg, sizeof(*hdr));
396 gss_teardown_sgtable(&sgt);
399 rc = crypto_hash_final(&desc, cksum->data);
403 return gss_crypt_generic(kb->kb_tfm, 0, NULL, cksum->data,
404 cksum->data, cksum->len);
407 int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
411 padding = (blocksize - (msg->len & (blocksize - 1))) &
416 if (msg->len + padding > msg_buflen) {
417 CERROR("bufsize %u too small: datalen %u, padding %u\n",
418 msg_buflen, msg->len, padding);
422 memset(msg->data + msg->len, padding, padding);
427 int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv,
428 int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj,
431 struct blkcipher_desc desc;
432 struct scatterlist src;
433 struct scatterlist dst;
434 struct sg_table sg_dst;
435 struct sg_table sg_src;
446 for (i = 0; i < inobj_cnt; i++) {
447 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
449 rc = gss_setup_sgtable(&sg_src, &src, inobjs[i].data,
454 rc = gss_setup_sgtable(&sg_dst, &dst, buf,
455 outobj->len - datalen);
457 gss_teardown_sgtable(&sg_src);
463 rc = crypto_blkcipher_encrypt_iv(&desc, &dst,
467 rc = crypto_blkcipher_decrypt_iv(&desc, &dst,
472 rc = crypto_blkcipher_encrypt(&desc, &dst, &src,
475 rc = crypto_blkcipher_decrypt(&desc, &dst, &src,
479 gss_teardown_sgtable(&sg_src);
480 gss_teardown_sgtable(&sg_dst);
483 CERROR("encrypt error %d\n", rc);
487 datalen += inobjs[i].len;
488 buf += inobjs[i].len;
491 outobj->len = datalen;