Whamcloud - gitweb
LU-2221 ptlrpc: kerberos support for kernel>=2.6.24
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
index 5bd9f08..24c2953 100644 (file)
@@ -1,9 +1,9 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * Modifications for Lustre
  *
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
  *
  * Author: Eric Mei <ericm@clusterfs.com>
  */
  *
  */
 
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
 #define DEBUG_SUBSYSTEM S_SEC
 #ifdef __KERNEL__
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/crypto.h>
-#include <linux/random.h>
 #include <linux/mutex.h>
 #else
 #include <liblustre.h>
@@ -155,12 +151,12 @@ static const char * enctype2str(__u32 enctype)
 static
 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
 {
-        kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
-        if (kb->kb_tfm == NULL) {
-                CERROR("failed to alloc tfm: %s, mode %d\n",
-                       alg_name, alg_mode);
-                return -1;
-        }
+       kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
+       if (IS_ERR(kb->kb_tfm)) {
+               CERROR("failed to alloc tfm: %s, mode %d\n",
+                      alg_name, alg_mode);
+               return -1;
+       }
 
         if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
                 CERROR("failed to set %s key, len %d\n",
@@ -241,7 +237,7 @@ int get_rawobj(char **ptr, const char *end, rawobj_t *res)
         if (q > end || q < p)
                 return -1;
 
-        OBD_ALLOC(res->data, len);
+        OBD_ALLOC_LARGE(res->data, len);
         if (!res->data)
                 return -1;
 
@@ -257,12 +253,12 @@ int get_keyblock(char **ptr, const char *end,
 {
         char *buf;
 
-        OBD_ALLOC(buf, keysize);
+        OBD_ALLOC_LARGE(buf, keysize);
         if (buf == NULL)
                 return -1;
 
         if (get_bytes(ptr, end, buf, keysize)) {
-                OBD_FREE(buf, keysize);
+                OBD_FREE_LARGE(buf, keysize);
                 return -1;
         }
 
@@ -533,9 +529,7 @@ void gss_delete_sec_context_kerberos(void *internal_ctx)
 static
 void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
 {
-        sg->page = virt_to_page(ptr);
-        sg->offset = offset_in_page(ptr);
-        sg->length = len;
+       sg_set_buf(sg, ptr, len);
 }
 
 static
@@ -612,9 +606,9 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm,
         for (i = 0; i < iovcnt; i++) {
                 if (iovs[i].kiov_len == 0)
                         continue;
-                sg[0].page = iovs[i].kiov_page;
-                sg[0].offset = iovs[i].kiov_offset;
-                sg[0].length = iovs[i].kiov_len;
+
+               sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+                           iovs[i].kiov_offset);
                 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
         }
 
@@ -651,9 +645,9 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm,
         for (i = 0; i < iovcnt; i++) {
                 if (iovs[i].kiov_len == 0)
                         continue;
-                sg[0].page = iovs[i].kiov_page;
-                sg[0].offset = iovs[i].kiov_offset;
-                sg[0].length = iovs[i].kiov_len;
+
+               sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+                           iovs[i].kiov_offset);
                 crypto_hmac_update(tfm, sg, 1);
         }
 
@@ -696,9 +690,9 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm,
         for (i = 0; i < iovcnt; i++) {
                 if (iovs[i].kiov_len == 0)
                         continue;
-                sg[0].page = iovs[i].kiov_page;
-                sg[0].offset = iovs[i].kiov_offset;
-                sg[0].length = iovs[i].kiov_len;
+
+               sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+                           iovs[i].kiov_offset);
                 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
         }
 
@@ -736,7 +730,7 @@ __s32 krb5_make_checksum(__u32 enctype,
         }
 
         cksum->len = ll_crypto_hash_digestsize(tfm);
-        OBD_ALLOC(cksum->data, cksum->len);
+        OBD_ALLOC_LARGE(cksum->data, cksum->len);
         if (!cksum->data) {
                 cksum->len = 0;
                 goto out_tfm;
@@ -777,9 +771,9 @@ static void fill_krb5_header(struct krb5_ctx *kctx,
         }
 
         khdr->kh_filler = 0xff;
-        spin_lock(&krb5_seq_lock);
-        khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
-        spin_unlock(&krb5_seq_lock);
+       spin_lock(&krb5_seq_lock);
+       khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
+       spin_unlock(&krb5_seq_lock);
 }
 
 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
@@ -1023,17 +1017,14 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
 
         /* encrypt clear pages */
         for (i = 0; i < desc->bd_iov_count; i++) {
-                src.page = desc->bd_iov[i].kiov_page;
-                src.offset = desc->bd_iov[i].kiov_offset;
-                src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
-                             (~(blocksize - 1));
-
-                if (adj_nob)
-                        nob += src.length;
-
-                dst.page = desc->bd_enc_iov[i].kiov_page;
-                dst.offset = src.offset;
-                dst.length = src.length;
+               sg_set_page(&src, desc->bd_iov[i].kiov_page,
+                           (desc->bd_iov[i].kiov_len + blocksize - 1) &
+                           (~(blocksize - 1)),
+                           desc->bd_iov[i].kiov_offset);
+               if (adj_nob)
+                       nob += src.length;
+               sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
+                           src.offset);
 
                 desc->bd_enc_iov[i].kiov_offset = dst.offset;
                 desc->bd_enc_iov[i].kiov_len = dst.length;
@@ -1150,13 +1141,12 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
                 if (desc->bd_enc_iov[i].kiov_len == 0)
                         continue;
 
-                src.page = desc->bd_enc_iov[i].kiov_page;
-                src.offset = desc->bd_enc_iov[i].kiov_offset;
-                src.length = desc->bd_enc_iov[i].kiov_len;
-
-                dst = src;
-                if (desc->bd_iov[i].kiov_len % blocksize == 0)
-                        dst.page = desc->bd_iov[i].kiov_page;
+               sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
+                           desc->bd_enc_iov[i].kiov_len,
+                           desc->bd_enc_iov[i].kiov_offset);
+               dst = src;
+               if (desc->bd_iov[i].kiov_len % blocksize == 0)
+                       sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
 
                 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
                                                     src.length);
@@ -1248,7 +1238,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
         fill_krb5_header(kctx, khdr, 1);
 
         /* generate confounder */
-        get_random_bytes(conf, ke->ke_conf_size);
+        cfs_get_random_bytes(conf, ke->ke_conf_size);
 
         /* get encryption blocksize. note kc_keye might not associated with
          * a tfm, currently only for arcfour-hmac */
@@ -1312,11 +1302,11 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                         GOTO(arc4_out, rc = -EACCES);
                 }
 
-                arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
-                if (arc4_tfm == NULL) {
-                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
-                        GOTO(arc4_out_key, rc = -EACCES);
-                }
+               arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+               if (IS_ERR(arc4_tfm)) {
+                       CERROR("failed to alloc tfm arc4 in ECB mode\n");
+                       GOTO(arc4_out_key, rc = -EACCES);
+               }
 
                 if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
                                                arc4_keye.len)) {
@@ -1418,7 +1408,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         fill_krb5_header(kctx, khdr, 1);
 
         /* generate confounder */
-        get_random_bytes(conf, ke->ke_conf_size);
+        cfs_get_random_bytes(conf, ke->ke_conf_size);
 
         /* get encryption blocksize. note kc_keye might not associated with
          * a tfm, currently only for arcfour-hmac */
@@ -1564,7 +1554,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
         }
 
         /* decrypting */
-        OBD_ALLOC(tmpbuf, bodysize);
+        OBD_ALLOC_LARGE(tmpbuf, bodysize);
         if (!tmpbuf)
                 return GSS_S_FAILURE;
 
@@ -1588,11 +1578,11 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                         GOTO(arc4_out, rc = -EACCES);
                 }
 
-                arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
-                if (arc4_tfm == NULL) {
-                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
-                        GOTO(arc4_out_key, rc = -EACCES);
-                }
+               arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+               if (IS_ERR(arc4_tfm)) {
+                       CERROR("failed to alloc tfm arc4 in ECB mode\n");
+                       GOTO(arc4_out_key, rc = -EACCES);
+               }
 
                 if (ll_crypto_blkcipher_setkey(arc4_tfm,
                                          arc4_keye.data, arc4_keye.len)) {
@@ -1661,7 +1651,7 @@ arc4_out:
 
         major = GSS_S_COMPLETE;
 out_free:
-        OBD_FREE(tmpbuf, bodysize);
+        OBD_FREE_LARGE(tmpbuf, bodysize);
         rawobj_free(&cksum);
         return major;
 }
@@ -1826,14 +1816,14 @@ static struct gss_api_mech gss_kerberos_mech = {
 
 int __init init_kerberos_module(void)
 {
-        int status;
+       int status;
 
-        spin_lock_init(&krb5_seq_lock);
+       spin_lock_init(&krb5_seq_lock);
 
-        status = lgss_mech_register(&gss_kerberos_mech);
-        if (status)
-                CERROR("Failed to register kerberos gss mechanism!\n");
-        return status;
+       status = lgss_mech_register(&gss_kerberos_mech);
+       if (status)
+               CERROR("Failed to register kerberos gss mechanism!\n");
+       return status;
 }
 
 void __exit cleanup_kerberos_module(void)