X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fgss%2Fgss_krb5_mech.c;h=24c2953974497523214e2f29a72db69f0a828422;hb=5084bc64c685c6584afd055fab0347f52040cfc2;hp=4b7900fde7cf1e897e25f133ee217018024a1634;hpb=711e142d055fda62be482f74f2f73acac5e7e453;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index 4b7900f..24c2953 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -1,10 +1,10 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * Modifications for Lustre * * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * + * Copyright (c) 2011, 2012, Intel Corporation. + * * Author: Eric Mei */ @@ -48,9 +48,6 @@ * */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #define DEBUG_SUBSYSTEM S_SEC #ifdef __KERNEL__ #include @@ -76,7 +73,7 @@ #include "gss_asn1.h" #include "gss_krb5.h" -static cfs_spinlock_t krb5_seq_lock; +static spinlock_t krb5_seq_lock; struct krb5_enctype { char *ke_dispname; @@ -154,12 +151,12 @@ static const char * enctype2str(__u32 enctype) static int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode) { - kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0); - if (kb->kb_tfm == NULL) { - CERROR("failed to alloc tfm: %s, mode %d\n", - alg_name, alg_mode); - return -1; - } + kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0); + if (IS_ERR(kb->kb_tfm)) { + CERROR("failed to alloc tfm: %s, mode %d\n", + alg_name, alg_mode); + return -1; + } if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) { CERROR("failed to set %s key, len %d\n", @@ -240,7 +237,7 @@ int get_rawobj(char **ptr, const char *end, rawobj_t *res) if (q > end || q < p) return -1; - OBD_ALLOC(res->data, len); + OBD_ALLOC_LARGE(res->data, len); if (!res->data) return -1; @@ -256,12 +253,12 @@ int get_keyblock(char **ptr, const char *end, { char *buf; - OBD_ALLOC(buf, keysize); + OBD_ALLOC_LARGE(buf, keysize); if (buf == NULL) return -1; if (get_bytes(ptr, end, buf, keysize)) { - OBD_FREE(buf, keysize); + OBD_FREE_LARGE(buf, keysize); return -1; } @@ -532,9 +529,7 @@ void gss_delete_sec_context_kerberos(void *internal_ctx) static void buf_to_sg(struct scatterlist *sg, void *ptr, int len) { - sg->page = virt_to_page(ptr); - sg->offset = offset_in_page(ptr); - sg->length = len; + sg_set_buf(sg, ptr, len); } static @@ -611,9 +606,9 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm, for (i = 0; i < iovcnt; i++) { if (iovs[i].kiov_len == 0) continue; - sg[0].page = iovs[i].kiov_page; - sg[0].offset = iovs[i].kiov_offset; - sg[0].length = iovs[i].kiov_len; + + sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, + iovs[i].kiov_offset); ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len); } @@ -650,9 +645,9 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm, for (i = 0; i < iovcnt; i++) { if (iovs[i].kiov_len == 0) continue; - sg[0].page = iovs[i].kiov_page; - sg[0].offset = iovs[i].kiov_offset; - sg[0].length = iovs[i].kiov_len; + + sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, + iovs[i].kiov_offset); crypto_hmac_update(tfm, sg, 1); } @@ -695,9 +690,9 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm, for (i = 0; i < iovcnt; i++) { if (iovs[i].kiov_len == 0) continue; - sg[0].page = iovs[i].kiov_page; - sg[0].offset = iovs[i].kiov_offset; - sg[0].length = iovs[i].kiov_len; + + sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, + iovs[i].kiov_offset); ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len); } @@ -735,7 +730,7 @@ __s32 krb5_make_checksum(__u32 enctype, } cksum->len = ll_crypto_hash_digestsize(tfm); - OBD_ALLOC(cksum->data, cksum->len); + OBD_ALLOC_LARGE(cksum->data, cksum->len); if (!cksum->data) { cksum->len = 0; goto out_tfm; @@ -776,9 +771,9 @@ static void fill_krb5_header(struct krb5_ctx *kctx, } khdr->kh_filler = 0xff; - cfs_spin_lock(&krb5_seq_lock); - khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++); - cfs_spin_unlock(&krb5_seq_lock); + spin_lock(&krb5_seq_lock); + khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++); + spin_unlock(&krb5_seq_lock); } static __u32 verify_krb5_header(struct krb5_ctx *kctx, @@ -1022,17 +1017,14 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, /* encrypt clear pages */ for (i = 0; i < desc->bd_iov_count; i++) { - src.page = desc->bd_iov[i].kiov_page; - src.offset = desc->bd_iov[i].kiov_offset; - src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) & - (~(blocksize - 1)); - - if (adj_nob) - nob += src.length; - - dst.page = desc->bd_enc_iov[i].kiov_page; - dst.offset = src.offset; - dst.length = src.length; + sg_set_page(&src, desc->bd_iov[i].kiov_page, + (desc->bd_iov[i].kiov_len + blocksize - 1) & + (~(blocksize - 1)), + desc->bd_iov[i].kiov_offset); + if (adj_nob) + nob += src.length; + sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length, + src.offset); desc->bd_enc_iov[i].kiov_offset = dst.offset; desc->bd_enc_iov[i].kiov_len = dst.length; @@ -1149,13 +1141,12 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm, if (desc->bd_enc_iov[i].kiov_len == 0) continue; - src.page = desc->bd_enc_iov[i].kiov_page; - src.offset = desc->bd_enc_iov[i].kiov_offset; - src.length = desc->bd_enc_iov[i].kiov_len; - - dst = src; - if (desc->bd_iov[i].kiov_len % blocksize == 0) - dst.page = desc->bd_iov[i].kiov_page; + sg_set_page(&src, desc->bd_enc_iov[i].kiov_page, + desc->bd_enc_iov[i].kiov_len, + desc->bd_enc_iov[i].kiov_offset); + dst = src; + if (desc->bd_iov[i].kiov_len % blocksize == 0) + sg_assign_page(&dst, desc->bd_iov[i].kiov_page); rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, src.length); @@ -1311,11 +1302,11 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, GOTO(arc4_out, rc = -EACCES); } - arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0); - if (arc4_tfm == NULL) { - CERROR("failed to alloc tfm arc4 in ECB mode\n"); - GOTO(arc4_out_key, rc = -EACCES); - } + arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0); + if (IS_ERR(arc4_tfm)) { + CERROR("failed to alloc tfm arc4 in ECB mode\n"); + GOTO(arc4_out_key, rc = -EACCES); + } if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, arc4_keye.len)) { @@ -1563,7 +1554,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, } /* decrypting */ - OBD_ALLOC(tmpbuf, bodysize); + OBD_ALLOC_LARGE(tmpbuf, bodysize); if (!tmpbuf) return GSS_S_FAILURE; @@ -1587,11 +1578,11 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, GOTO(arc4_out, rc = -EACCES); } - arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0); - if (arc4_tfm == NULL) { - CERROR("failed to alloc tfm arc4 in ECB mode\n"); - GOTO(arc4_out_key, rc = -EACCES); - } + arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0); + if (IS_ERR(arc4_tfm)) { + CERROR("failed to alloc tfm arc4 in ECB mode\n"); + GOTO(arc4_out_key, rc = -EACCES); + } if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data, arc4_keye.len)) { @@ -1660,7 +1651,7 @@ arc4_out: major = GSS_S_COMPLETE; out_free: - OBD_FREE(tmpbuf, bodysize); + OBD_FREE_LARGE(tmpbuf, bodysize); rawobj_free(&cksum); return major; } @@ -1825,14 +1816,14 @@ static struct gss_api_mech gss_kerberos_mech = { int __init init_kerberos_module(void) { - int status; + int status; - cfs_spin_lock_init(&krb5_seq_lock); + spin_lock_init(&krb5_seq_lock); - status = lgss_mech_register(&gss_kerberos_mech); - if (status) - CERROR("Failed to register kerberos gss mechanism!\n"); - return status; + status = lgss_mech_register(&gss_kerberos_mech); + if (status) + CERROR("Failed to register kerberos gss mechanism!\n"); + return status; } void __exit cleanup_kerberos_module(void)