-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* Modifications for Lustre
*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
*
* Author: Eric Mei <ericm@clusterfs.com>
*/
*
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_SEC
#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/crypto.h>
-#include <linux/random.h>
#include <linux/mutex.h>
#else
#include <liblustre.h>
static
int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
{
- kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
- if (kb->kb_tfm == NULL) {
- CERROR("failed to alloc tfm: %s, mode %d\n",
- alg_name, alg_mode);
- return -1;
- }
+ kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
+ if (IS_ERR(kb->kb_tfm)) {
+ CERROR("failed to alloc tfm: %s, mode %d\n",
+ alg_name, alg_mode);
+ return -1;
+ }
if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
CERROR("failed to set %s key, len %d\n",
if (q > end || q < p)
return -1;
- OBD_ALLOC(res->data, len);
+ OBD_ALLOC_LARGE(res->data, len);
if (!res->data)
return -1;
{
char *buf;
- OBD_ALLOC(buf, keysize);
+ OBD_ALLOC_LARGE(buf, keysize);
if (buf == NULL)
return -1;
if (get_bytes(ptr, end, buf, keysize)) {
- OBD_FREE(buf, keysize);
+ OBD_FREE_LARGE(buf, keysize);
return -1;
}
static
void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
{
- sg->page = virt_to_page(ptr);
- sg->offset = offset_in_page(ptr);
- sg->length = len;
+ sg_set_buf(sg, ptr, len);
}
static
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
- sg[0].page = iovs[i].kiov_page;
- sg[0].offset = iovs[i].kiov_offset;
- sg[0].length = iovs[i].kiov_len;
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
- sg[0].page = iovs[i].kiov_page;
- sg[0].offset = iovs[i].kiov_offset;
- sg[0].length = iovs[i].kiov_len;
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
crypto_hmac_update(tfm, sg, 1);
}
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
- sg[0].page = iovs[i].kiov_page;
- sg[0].offset = iovs[i].kiov_offset;
- sg[0].length = iovs[i].kiov_len;
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
}
cksum->len = ll_crypto_hash_digestsize(tfm);
- OBD_ALLOC(cksum->data, cksum->len);
+ OBD_ALLOC_LARGE(cksum->data, cksum->len);
if (!cksum->data) {
cksum->len = 0;
goto out_tfm;
}
khdr->kh_filler = 0xff;
- spin_lock(&krb5_seq_lock);
- khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
- spin_unlock(&krb5_seq_lock);
+ spin_lock(&krb5_seq_lock);
+ khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
+ spin_unlock(&krb5_seq_lock);
}
static __u32 verify_krb5_header(struct krb5_ctx *kctx,
RETURN(0);
}
+/*
+ * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
+ */
static
int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
struct krb5_header *khdr,
/* encrypt clear pages */
for (i = 0; i < desc->bd_iov_count; i++) {
- src.page = desc->bd_iov[i].kiov_page;
- src.offset = desc->bd_iov[i].kiov_offset;
- src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
- (~(blocksize - 1));
-
- if (adj_nob)
- nob += src.length;
-
- dst.page = desc->bd_enc_iov[i].kiov_page;
- dst.offset = src.offset;
- dst.length = src.length;
+ sg_set_page(&src, desc->bd_iov[i].kiov_page,
+ (desc->bd_iov[i].kiov_len + blocksize - 1) &
+ (~(blocksize - 1)),
+ desc->bd_iov[i].kiov_offset);
+ if (adj_nob)
+ nob += src.length;
+ sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
+ src.offset);
desc->bd_enc_iov[i].kiov_offset = dst.offset;
desc->bd_enc_iov[i].kiov_len = dst.length;
/*
* desc->bd_nob_transferred is the size of cipher text received.
* desc->bd_nob is the target size of plain text supposed to be.
+ *
+ * if adj_nob != 0, we adjust each page's kiov_len to the actual
+ * plain text size.
+ * - for client read: we don't know data size for each page, so
+ * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
+ * be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
+ * this means we DO NOT support the situation that server send an odd size
+ * data in a page which is not the last one.
+ * - for server write: we knows exactly data size for each page being expected,
+ * thus kiov_len is accurate already, so we should not adjust it at all.
+ * and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
+ * should have been done by prep_bulk().
*/
static
int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
struct krb5_header *khdr,
struct ptlrpc_bulk_desc *desc,
rawobj_t *cipher,
- rawobj_t *plain)
+ rawobj_t *plain,
+ int adj_nob)
{
struct blkcipher_desc ciph_desc;
__u8 local_iv[16] = {0};
return rc;
}
- /*
- * decrypt clear pages. note the enc_iov is prepared by prep_bulk()
- * which already done some sanity checkings.
- *
- * desc->bd_nob is the actual plain text size supposed to be
- * transferred. desc->bd_nob_transferred is the actual cipher
- * text received.
- */
for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
i++) {
- if (desc->bd_enc_iov[i].kiov_len == 0)
- continue;
-
- if (ct_nob + desc->bd_enc_iov[i].kiov_len >
- desc->bd_nob_transferred)
- desc->bd_enc_iov[i].kiov_len =
- desc->bd_nob_transferred - ct_nob;
+ if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
+ desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
+ CERROR("page %d: odd offset %u len %u, blocksize %d\n",
+ i, desc->bd_enc_iov[i].kiov_offset,
+ desc->bd_enc_iov[i].kiov_len, blocksize);
+ return -EFAULT;
+ }
- desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
- if (pt_nob + desc->bd_enc_iov[i].kiov_len > desc->bd_nob)
- desc->bd_iov[i].kiov_len = desc->bd_nob - pt_nob;
+ if (adj_nob) {
+ if (ct_nob + desc->bd_enc_iov[i].kiov_len >
+ desc->bd_nob_transferred)
+ desc->bd_enc_iov[i].kiov_len =
+ desc->bd_nob_transferred - ct_nob;
- src.page = desc->bd_enc_iov[i].kiov_page;
- src.offset = desc->bd_enc_iov[i].kiov_offset;
- src.length = desc->bd_enc_iov[i].kiov_len;
+ desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
+ if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
+ desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
+ } else {
+ /* this should be guaranteed by LNET */
+ LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
+ desc->bd_nob_transferred);
+ LASSERT(desc->bd_iov[i].kiov_len <=
+ desc->bd_enc_iov[i].kiov_len);
+ }
- dst = src;
+ if (desc->bd_enc_iov[i].kiov_len == 0)
+ continue;
- if (desc->bd_iov[i].kiov_offset % blocksize == 0)
- dst.page = desc->bd_iov[i].kiov_page;
+ sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
+ desc->bd_enc_iov[i].kiov_len,
+ desc->bd_enc_iov[i].kiov_offset);
+ dst = src;
+ if (desc->bd_iov[i].kiov_len % blocksize == 0)
+ sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
src.length);
return rc;
}
- if (desc->bd_iov[i].kiov_offset % blocksize) {
+ if (desc->bd_iov[i].kiov_len % blocksize != 0) {
memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) +
desc->bd_iov[i].kiov_offset,
cfs_page_address(desc->bd_enc_iov[i].kiov_page) +
pt_nob += desc->bd_iov[i].kiov_len;
}
+ if (unlikely(ct_nob != desc->bd_nob_transferred)) {
+ CERROR("%d cipher text transferred but only %d decrypted\n",
+ desc->bd_nob_transferred, ct_nob);
+ return -EFAULT;
+ }
+
+ if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
+ CERROR("%d plain text expected but only %d received\n",
+ desc->bd_nob, pt_nob);
+ return -EFAULT;
+ }
+
+ /* if needed, clear up the rest unused iovs */
+ if (adj_nob)
+ while (i < desc->bd_iov_count)
+ desc->bd_iov[i++].kiov_len = 0;
+
/* decrypt tail (krb5 header) */
buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
fill_krb5_header(kctx, khdr, 1);
/* generate confounder */
- get_random_bytes(conf, ke->ke_conf_size);
+ cfs_get_random_bytes(conf, ke->ke_conf_size);
/* get encryption blocksize. note kc_keye might not associated with
* a tfm, currently only for arcfour-hmac */
GOTO(arc4_out, rc = -EACCES);
}
- arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
- if (arc4_tfm == NULL) {
- CERROR("failed to alloc tfm arc4 in ECB mode\n");
- GOTO(arc4_out_key, rc = -EACCES);
- }
+ arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ if (IS_ERR(arc4_tfm)) {
+ CERROR("failed to alloc tfm arc4 in ECB mode\n");
+ GOTO(arc4_out_key, rc = -EACCES);
+ }
if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
arc4_keye.len)) {
fill_krb5_header(kctx, khdr, 1);
/* generate confounder */
- get_random_bytes(conf, ke->ke_conf_size);
+ cfs_get_random_bytes(conf, ke->ke_conf_size);
/* get encryption blocksize. note kc_keye might not associated with
* a tfm, currently only for arcfour-hmac */
}
/* decrypting */
- OBD_ALLOC(tmpbuf, bodysize);
+ OBD_ALLOC_LARGE(tmpbuf, bodysize);
if (!tmpbuf)
return GSS_S_FAILURE;
GOTO(arc4_out, rc = -EACCES);
}
- arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
- if (arc4_tfm == NULL) {
- CERROR("failed to alloc tfm arc4 in ECB mode\n");
- GOTO(arc4_out_key, rc = -EACCES);
- }
+ arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ if (IS_ERR(arc4_tfm)) {
+ CERROR("failed to alloc tfm arc4 in ECB mode\n");
+ GOTO(arc4_out_key, rc = -EACCES);
+ }
if (ll_crypto_blkcipher_setkey(arc4_tfm,
arc4_keye.data, arc4_keye.len)) {
major = GSS_S_COMPLETE;
out_free:
- OBD_FREE(tmpbuf, bodysize);
+ OBD_FREE_LARGE(tmpbuf, bodysize);
rawobj_free(&cksum);
return major;
}
static
__u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
struct ptlrpc_bulk_desc *desc,
- rawobj_t *token)
+ rawobj_t *token, int adj_nob)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
plain.len = cipher.len;
rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
- desc, &cipher, &plain);
+ desc, &cipher, &plain, adj_nob);
if (rc)
return GSS_S_DEFECTIVE_TOKEN;
int __init init_kerberos_module(void)
{
- int status;
+ int status;
- spin_lock_init(&krb5_seq_lock);
+ spin_lock_init(&krb5_seq_lock);
- status = lgss_mech_register(&gss_kerberos_mech);
- if (status)
- CERROR("Failed to register kerberos gss mechanism!\n");
- return status;
+ status = lgss_mech_register(&gss_kerberos_mech);
+ if (status)
+ CERROR("Failed to register kerberos gss mechanism!\n");
+ return status;
}
void __exit cleanup_kerberos_module(void)