Whamcloud - gitweb
LU-2221 ptlrpc: kerberos support for kernel>=2.6.24
authorFan Yong <yong.fan@whamcloud.com>
Sun, 6 Jan 2013 23:42:27 +0000 (07:42 +0800)
committerOleg Drokin <green@whamcloud.com>
Thu, 31 Jan 2013 06:36:57 +0000 (01:36 -0500)
Since kernel 2.6.24 the scatterlist struct has no field "page".
Then related Lustre kerberos code cannot work anymore.

So do not access scatterlist::page directly, instead, use the
scatterlist functions sg_set_page and sg_assign_page.

Signed-off-by: Fan Yong <fan.yong@intel.com>
Signed-off-by: Thomas Stibor <thomas@stibor.net>
Change-Id: I446925bb42c1e018a55a69948383c8f71976f1fa
Reviewed-on: http://review.whamcloud.com/4394
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Nathaniel Clark <nathaniel.l.clark@intel.com>
libcfs/autoconf/lustre-libcfs.m4
libcfs/include/libcfs/linux/kp30.h
lustre/ptlrpc/gss/gss_krb5_mech.c
lustre/ptlrpc/gss/sec_gss.c

index 04f61a6..e661f7d 100644 (file)
@@ -367,7 +367,7 @@ LB_LINUX_TRY_COMPILE([
 ],[
         AC_MSG_RESULT(yes)
         AC_DEFINE(HAVE_SCATTERLIST_SETPAGE, 1,
 ],[
         AC_MSG_RESULT(yes)
         AC_DEFINE(HAVE_SCATTERLIST_SETPAGE, 1,
-                  [struct scatterlist has page member])
+                  [struct scatterlist has no page member])
 ],[
         AC_MSG_RESULT(NO)
 ])
 ],[
         AC_MSG_RESULT(NO)
 ])
index 5ef7559..205fdfd 100644 (file)
@@ -337,6 +337,11 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
         sg->offset = offset;
         sg->length = len;
 }
         sg->offset = offset;
         sg->length = len;
 }
+
+static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
+{
+       sg->page = page;
+}
 #endif
 
 #define cfs_smp_processor_id()  smp_processor_id()
 #endif
 
 #define cfs_smp_processor_id()  smp_processor_id()
index 7ed8269..24c2953 100644 (file)
@@ -529,9 +529,7 @@ void gss_delete_sec_context_kerberos(void *internal_ctx)
 static
 void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
 {
 static
 void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
 {
-        sg->page = virt_to_page(ptr);
-        sg->offset = offset_in_page(ptr);
-        sg->length = len;
+       sg_set_buf(sg, ptr, len);
 }
 
 static
 }
 
 static
@@ -608,9 +606,9 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm,
         for (i = 0; i < iovcnt; i++) {
                 if (iovs[i].kiov_len == 0)
                         continue;
         for (i = 0; i < iovcnt; i++) {
                 if (iovs[i].kiov_len == 0)
                         continue;
-                sg[0].page = iovs[i].kiov_page;
-                sg[0].offset = iovs[i].kiov_offset;
-                sg[0].length = iovs[i].kiov_len;
+
+               sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+                           iovs[i].kiov_offset);
                 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
         }
 
                 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
         }
 
@@ -647,9 +645,9 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm,
         for (i = 0; i < iovcnt; i++) {
                 if (iovs[i].kiov_len == 0)
                         continue;
         for (i = 0; i < iovcnt; i++) {
                 if (iovs[i].kiov_len == 0)
                         continue;
-                sg[0].page = iovs[i].kiov_page;
-                sg[0].offset = iovs[i].kiov_offset;
-                sg[0].length = iovs[i].kiov_len;
+
+               sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+                           iovs[i].kiov_offset);
                 crypto_hmac_update(tfm, sg, 1);
         }
 
                 crypto_hmac_update(tfm, sg, 1);
         }
 
@@ -692,9 +690,9 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm,
         for (i = 0; i < iovcnt; i++) {
                 if (iovs[i].kiov_len == 0)
                         continue;
         for (i = 0; i < iovcnt; i++) {
                 if (iovs[i].kiov_len == 0)
                         continue;
-                sg[0].page = iovs[i].kiov_page;
-                sg[0].offset = iovs[i].kiov_offset;
-                sg[0].length = iovs[i].kiov_len;
+
+               sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+                           iovs[i].kiov_offset);
                 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
         }
 
                 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
         }
 
@@ -1019,17 +1017,14 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
 
         /* encrypt clear pages */
         for (i = 0; i < desc->bd_iov_count; i++) {
 
         /* encrypt clear pages */
         for (i = 0; i < desc->bd_iov_count; i++) {
-                src.page = desc->bd_iov[i].kiov_page;
-                src.offset = desc->bd_iov[i].kiov_offset;
-                src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
-                             (~(blocksize - 1));
-
-                if (adj_nob)
-                        nob += src.length;
-
-                dst.page = desc->bd_enc_iov[i].kiov_page;
-                dst.offset = src.offset;
-                dst.length = src.length;
+               sg_set_page(&src, desc->bd_iov[i].kiov_page,
+                           (desc->bd_iov[i].kiov_len + blocksize - 1) &
+                           (~(blocksize - 1)),
+                           desc->bd_iov[i].kiov_offset);
+               if (adj_nob)
+                       nob += src.length;
+               sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
+                           src.offset);
 
                 desc->bd_enc_iov[i].kiov_offset = dst.offset;
                 desc->bd_enc_iov[i].kiov_len = dst.length;
 
                 desc->bd_enc_iov[i].kiov_offset = dst.offset;
                 desc->bd_enc_iov[i].kiov_len = dst.length;
@@ -1146,13 +1141,12 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
                 if (desc->bd_enc_iov[i].kiov_len == 0)
                         continue;
 
                 if (desc->bd_enc_iov[i].kiov_len == 0)
                         continue;
 
-                src.page = desc->bd_enc_iov[i].kiov_page;
-                src.offset = desc->bd_enc_iov[i].kiov_offset;
-                src.length = desc->bd_enc_iov[i].kiov_len;
-
-                dst = src;
-                if (desc->bd_iov[i].kiov_len % blocksize == 0)
-                        dst.page = desc->bd_iov[i].kiov_page;
+               sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
+                           desc->bd_enc_iov[i].kiov_len,
+                           desc->bd_enc_iov[i].kiov_offset);
+               dst = src;
+               if (desc->bd_iov[i].kiov_len % blocksize == 0)
+                       sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
 
                 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
                                                     src.length);
 
                 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
                                                     src.length);
index 9046335..c244b1b 100644 (file)
@@ -73,6 +73,7 @@
 #include "gss_api.h"
 
 #include <linux/crypto.h>
 #include "gss_api.h"
 
 #include <linux/crypto.h>
+#include <linux/crc32.h>
 
 /*
  * early reply have fixed size, respectively in privacy and integrity mode.
 
 /*
  * early reply have fixed size, respectively in privacy and integrity mode.