2 * Modifications for Lustre
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
6 * Author: Eric Mei <ericm@clusterfs.com>
10 * linux/net/sunrpc/gss_krb5_mech.c
11 * linux/net/sunrpc/gss_krb5_crypto.c
12 * linux/net/sunrpc/gss_krb5_seal.c
13 * linux/net/sunrpc/gss_krb5_seqnum.c
14 * linux/net/sunrpc/gss_krb5_unseal.c
16 * Copyright (c) 2001 The Regents of the University of Michigan.
17 * All rights reserved.
19 * Andy Adamson <andros@umich.edu>
20 * J. Bruce Fields <bfields@umich.edu>
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. Neither the name of the University nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
36 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
38 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
40 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
41 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
42 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
43 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #define DEBUG_SUBSYSTEM S_SEC
51 #include <libcfs/linux/linux-crypto.h>
53 #include <obd_support.h>
55 #include "gss_internal.h"
56 #include "gss_crypto.h"
58 int gss_keyblock_init(struct gss_keyblock *kb, const char *alg_name,
63 kb->kb_tfm = crypto_alloc_sync_skcipher(alg_name, alg_mode, 0);
64 if (IS_ERR(kb->kb_tfm)) {
65 rc = PTR_ERR(kb->kb_tfm);
67 CERROR("failed to alloc tfm: %s, mode %d: rc = %d\n", alg_name,
72 rc = crypto_sync_skcipher_setkey(kb->kb_tfm, kb->kb_key.data,
75 CERROR("failed to set %s key, len %d, rc = %d\n", alg_name,
83 void gss_keyblock_free(struct gss_keyblock *kb)
85 rawobj_free(&kb->kb_key);
87 crypto_free_sync_skcipher(kb->kb_tfm);
90 int gss_keyblock_dup(struct gss_keyblock *new, struct gss_keyblock *kb)
92 return rawobj_dup(&new->kb_key, &kb->kb_key);
95 int gss_get_bytes(char **ptr, const char *end, void *res, size_t len)
100 if (q > end || q < p)
107 int gss_get_rawobj(char **ptr, const char *end, rawobj_t *res)
113 if (gss_get_bytes(&p, end, &len, sizeof(len)))
117 if (q > end || q < p)
120 /* Support empty objects */
122 OBD_ALLOC_LARGE(res->data, len);
132 memcpy(res->data, p, len);
137 int gss_get_keyblock(char **ptr, const char *end,
138 struct gss_keyblock *kb, __u32 keysize)
143 OBD_ALLOC_LARGE(buf, keysize);
147 rc = gss_get_bytes(ptr, end, buf, keysize);
149 OBD_FREE_LARGE(buf, keysize);
153 kb->kb_key.len = keysize;
154 kb->kb_key.data = buf;
159 * Should be used for buffers allocated with k/vmalloc().
161 * Dispose of @sgt with gss_teardown_sgtable().
163 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
164 * in cases where a single sg is sufficient. No attempt to reduce the
165 * number of sgs by squeezing physically contiguous pages together is
166 * made though, for simplicity.
168 * This function is copied from the ceph filesystem code.
170 int gss_setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
171 const void *buf, unsigned int buf_len)
173 struct scatterlist *sg;
174 const bool is_vmalloc = is_vmalloc_addr(buf);
175 unsigned int off = offset_in_page(buf);
176 unsigned int chunk_cnt = 1;
177 unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
182 memset(sgt, 0, sizeof(*sgt));
187 chunk_cnt = chunk_len >> PAGE_SHIFT;
188 chunk_len = PAGE_SIZE;
192 rc = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
196 WARN_ON_ONCE(chunk_cnt != 1);
197 sg_init_table(prealloc_sg, 1);
198 sgt->sgl = prealloc_sg;
199 sgt->nents = sgt->orig_nents = 1;
202 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
204 unsigned int len = min(chunk_len - off, buf_len);
207 page = vmalloc_to_page(buf);
209 page = virt_to_page(buf);
211 sg_set_page(sg, page, len, off);
218 WARN_ON_ONCE(buf_len != 0);
223 void gss_teardown_sgtable(struct sg_table *sgt)
225 if (sgt->orig_nents > 1)
229 int gss_crypt_generic(struct crypto_sync_skcipher *tfm, int decrypt,
230 const void *iv, const void *in, void *out, size_t length)
232 struct scatterlist sg;
233 struct sg_table sg_out;
234 __u8 local_iv[16] = {0};
236 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
240 if (length % crypto_sync_skcipher_blocksize(tfm) != 0) {
241 CERROR("output length %zu mismatch blocksize %d\n",
242 length, crypto_sync_skcipher_blocksize(tfm));
246 if (crypto_sync_skcipher_ivsize(tfm) > ARRAY_SIZE(local_iv)) {
247 CERROR("iv size too large %d\n",
248 crypto_sync_skcipher_ivsize(tfm));
253 memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
256 memmove(out, in, length);
258 ret = gss_setup_sgtable(&sg_out, &sg, out, length);
262 skcipher_request_set_sync_tfm(req, tfm);
263 skcipher_request_set_callback(req, 0, NULL, NULL);
264 skcipher_request_set_crypt(req, &sg, &sg, length, local_iv);
267 ret = crypto_skcipher_decrypt_iv(req, &sg, &sg, length);
269 ret = crypto_skcipher_encrypt_iv(req, &sg, &sg, length);
271 skcipher_request_zero(req);
272 gss_teardown_sgtable(&sg_out);
277 int gss_digest_hash(struct ahash_request *req,
278 rawobj_t *hdr, int msgcnt, rawobj_t *msgs,
279 int iovcnt, lnet_kiov_t *iovs)
281 struct scatterlist sg[1];
286 for (i = 0; i < msgcnt; i++) {
287 if (msgs[i].len == 0)
290 rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
294 ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
295 rc = crypto_ahash_update(req);
296 gss_teardown_sgtable(&sgt);
301 for (i = 0; i < iovcnt; i++) {
302 if (iovs[i].kiov_len == 0)
305 sg_init_table(sg, 1);
306 sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
307 iovs[i].kiov_offset);
309 ahash_request_set_crypt(req, sg, NULL, iovs[i].kiov_len);
310 rc = crypto_ahash_update(req);
316 rc = gss_setup_sgtable(&sgt, sg, hdr->data, hdr->len);
320 ahash_request_set_crypt(req, sg, NULL, hdr->len);
321 rc = crypto_ahash_update(req);
322 gss_teardown_sgtable(&sgt);
330 int gss_digest_hash_compat(struct ahash_request *req,
331 rawobj_t *hdr, int msgcnt, rawobj_t *msgs,
332 int iovcnt, lnet_kiov_t *iovs)
334 struct scatterlist sg[1];
339 for (i = 0; i < msgcnt; i++) {
340 if (msgs[i].len == 0)
343 rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
347 ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
348 rc = crypto_ahash_update(req);
349 gss_teardown_sgtable(&sgt);
354 for (i = 0; i < iovcnt; i++) {
355 if (iovs[i].kiov_len == 0)
358 sg_init_table(sg, 1);
359 sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
360 iovs[i].kiov_offset);
362 ahash_request_set_crypt(req, sg, NULL, iovs[i].kiov_len);
363 rc = crypto_ahash_update(req);
369 rc = gss_setup_sgtable(&sgt, sg, &(hdr->len), sizeof(hdr->len));
373 ahash_request_set_crypt(req, sg, NULL, sizeof(hdr->len));
374 rc = crypto_ahash_update(req);
375 gss_teardown_sgtable(&sgt);
383 int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
387 padding = (blocksize - (msg->len & (blocksize - 1))) &
392 if (msg->len + padding > msg_buflen) {
393 CERROR("bufsize %u too small: datalen %u, padding %u\n",
394 msg_buflen, msg->len, padding);
398 memset(msg->data + msg->len, padding, padding);
403 int gss_crypt_rawobjs(struct crypto_sync_skcipher *tfm, __u8 *iv,
404 int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj,
407 struct scatterlist src;
408 struct scatterlist dst;
409 struct sg_table sg_dst;
410 struct sg_table sg_src;
414 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
419 skcipher_request_set_sync_tfm(req, tfm);
420 skcipher_request_set_callback(req, 0, NULL, NULL);
422 for (i = 0; i < inobj_cnt; i++) {
423 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
425 rc = gss_setup_sgtable(&sg_src, &src, inobjs[i].data,
430 rc = gss_setup_sgtable(&sg_dst, &dst, buf,
431 outobj->len - datalen);
433 gss_teardown_sgtable(&sg_src);
437 skcipher_request_set_crypt(req, &src, &dst, src.length, iv);
439 skcipher_request_set_crypt_iv(req);
442 rc = crypto_skcipher_encrypt_iv(req, &dst, &src,
445 rc = crypto_skcipher_decrypt_iv(req, &dst, &src,
448 gss_teardown_sgtable(&sg_src);
449 gss_teardown_sgtable(&sg_dst);
452 CERROR("encrypt error %d\n", rc);
453 skcipher_request_zero(req);
457 datalen += inobjs[i].len;
458 buf += inobjs[i].len;
460 skcipher_request_zero(req);
462 outobj->len = datalen;