*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
- * Copyright (c) 2011, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*
* Author: Eric Mei <ericm@clusterfs.com>
*/
*
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_SEC
#ifdef __KERNEL__
#include <linux/init.h>
#include "gss_asn1.h"
#include "gss_krb5.h"
-static cfs_spinlock_t krb5_seq_lock;
+static spinlock_t krb5_seq_lock;
struct krb5_enctype {
char *ke_dispname;
static
void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
{
- sg->page = virt_to_page(ptr);
- sg->offset = offset_in_page(ptr);
- sg->length = len;
+ sg_set_buf(sg, ptr, len);
}
static
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
- sg[0].page = iovs[i].kiov_page;
- sg[0].offset = iovs[i].kiov_offset;
- sg[0].length = iovs[i].kiov_len;
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
- sg[0].page = iovs[i].kiov_page;
- sg[0].offset = iovs[i].kiov_offset;
- sg[0].length = iovs[i].kiov_len;
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
crypto_hmac_update(tfm, sg, 1);
}
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
- sg[0].page = iovs[i].kiov_page;
- sg[0].offset = iovs[i].kiov_offset;
- sg[0].length = iovs[i].kiov_len;
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
}
khdr->kh_filler = 0xff;
- cfs_spin_lock(&krb5_seq_lock);
- khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
- cfs_spin_unlock(&krb5_seq_lock);
+ spin_lock(&krb5_seq_lock);
+ khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
+ spin_unlock(&krb5_seq_lock);
}
static __u32 verify_krb5_header(struct krb5_ctx *kctx,
/* encrypt clear pages */
for (i = 0; i < desc->bd_iov_count; i++) {
- src.page = desc->bd_iov[i].kiov_page;
- src.offset = desc->bd_iov[i].kiov_offset;
- src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
- (~(blocksize - 1));
-
- if (adj_nob)
- nob += src.length;
-
- dst.page = desc->bd_enc_iov[i].kiov_page;
- dst.offset = src.offset;
- dst.length = src.length;
+ sg_set_page(&src, desc->bd_iov[i].kiov_page,
+ (desc->bd_iov[i].kiov_len + blocksize - 1) &
+ (~(blocksize - 1)),
+ desc->bd_iov[i].kiov_offset);
+ if (adj_nob)
+ nob += src.length;
+ sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
+ src.offset);
desc->bd_enc_iov[i].kiov_offset = dst.offset;
desc->bd_enc_iov[i].kiov_len = dst.length;
if (desc->bd_enc_iov[i].kiov_len == 0)
continue;
- src.page = desc->bd_enc_iov[i].kiov_page;
- src.offset = desc->bd_enc_iov[i].kiov_offset;
- src.length = desc->bd_enc_iov[i].kiov_len;
-
- dst = src;
- if (desc->bd_iov[i].kiov_len % blocksize == 0)
- dst.page = desc->bd_iov[i].kiov_page;
+ sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
+ desc->bd_enc_iov[i].kiov_len,
+ desc->bd_enc_iov[i].kiov_offset);
+ dst = src;
+ if (desc->bd_iov[i].kiov_len % blocksize == 0)
+ sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
src.length);
int __init init_kerberos_module(void)
{
- int status;
+ int status;
- cfs_spin_lock_init(&krb5_seq_lock);
+ spin_lock_init(&krb5_seq_lock);
- status = lgss_mech_register(&gss_kerberos_mech);
- if (status)
- CERROR("Failed to register kerberos gss mechanism!\n");
- return status;
+ status = lgss_mech_register(&gss_kerberos_mech);
+ if (status)
+ CERROR("Failed to register kerberos gss mechanism!\n");
+ return status;
}
void __exit cleanup_kerberos_module(void)