From 5084bc64c685c6584afd055fab0347f52040cfc2 Mon Sep 17 00:00:00 2001 From: Fan Yong Date: Mon, 7 Jan 2013 07:42:27 +0800 Subject: [PATCH] LU-2221 ptlrpc: kerberos support for kernel>=2.6.24 Since kernel 2.6.24 the scatterlist struct has no field "page". Then related Lustre kerberos code cannot work anymore. So do not access scatterlist::page directly, instead, use the scatterlist functions sg_set_page and sg_assign_page. Signed-off-by: Fan Yong Signed-off-by: Thomas Stibor Change-Id: I446925bb42c1e018a55a69948383c8f71976f1fa Reviewed-on: http://review.whamcloud.com/4394 Tested-by: Hudson Tested-by: Maloo Reviewed-by: Andreas Dilger Reviewed-by: Nathaniel Clark --- libcfs/autoconf/lustre-libcfs.m4 | 2 +- libcfs/include/libcfs/linux/kp30.h | 5 ++++ lustre/ptlrpc/gss/gss_krb5_mech.c | 54 +++++++++++++++++--------------------- lustre/ptlrpc/gss/sec_gss.c | 1 + 4 files changed, 31 insertions(+), 31 deletions(-) diff --git a/libcfs/autoconf/lustre-libcfs.m4 b/libcfs/autoconf/lustre-libcfs.m4 index 04f61a6..e661f7d 100644 --- a/libcfs/autoconf/lustre-libcfs.m4 +++ b/libcfs/autoconf/lustre-libcfs.m4 @@ -367,7 +367,7 @@ LB_LINUX_TRY_COMPILE([ ],[ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_SCATTERLIST_SETPAGE, 1, - [struct scatterlist has page member]) + [struct scatterlist has no page member]) ],[ AC_MSG_RESULT(NO) ]) diff --git a/libcfs/include/libcfs/linux/kp30.h b/libcfs/include/libcfs/linux/kp30.h index 5ef7559..205fdfd 100644 --- a/libcfs/include/libcfs/linux/kp30.h +++ b/libcfs/include/libcfs/linux/kp30.h @@ -337,6 +337,11 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page, sg->offset = offset; sg->length = len; } + +static inline void sg_assign_page(struct scatterlist *sg, struct page *page) +{ + sg->page = page; +} #endif #define cfs_smp_processor_id() smp_processor_id() diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index 7ed8269..24c2953 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -529,9 +529,7 @@ void gss_delete_sec_context_kerberos(void *internal_ctx) static void buf_to_sg(struct scatterlist *sg, void *ptr, int len) { - sg->page = virt_to_page(ptr); - sg->offset = offset_in_page(ptr); - sg->length = len; + sg_set_buf(sg, ptr, len); } static @@ -608,9 +606,9 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm, for (i = 0; i < iovcnt; i++) { if (iovs[i].kiov_len == 0) continue; - sg[0].page = iovs[i].kiov_page; - sg[0].offset = iovs[i].kiov_offset; - sg[0].length = iovs[i].kiov_len; + + sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, + iovs[i].kiov_offset); ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len); } @@ -647,9 +645,9 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm, for (i = 0; i < iovcnt; i++) { if (iovs[i].kiov_len == 0) continue; - sg[0].page = iovs[i].kiov_page; - sg[0].offset = iovs[i].kiov_offset; - sg[0].length = iovs[i].kiov_len; + + sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, + iovs[i].kiov_offset); crypto_hmac_update(tfm, sg, 1); } @@ -692,9 +690,9 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm, for (i = 0; i < iovcnt; i++) { if (iovs[i].kiov_len == 0) continue; - sg[0].page = iovs[i].kiov_page; - sg[0].offset = iovs[i].kiov_offset; - sg[0].length = iovs[i].kiov_len; + + sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, + iovs[i].kiov_offset); ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len); } @@ -1019,17 +1017,14 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm, /* encrypt clear pages */ for (i = 0; i < desc->bd_iov_count; i++) { - src.page = desc->bd_iov[i].kiov_page; - src.offset = desc->bd_iov[i].kiov_offset; - src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) & - (~(blocksize - 1)); - - if (adj_nob) - nob += src.length; - - dst.page = desc->bd_enc_iov[i].kiov_page; - dst.offset = src.offset; - dst.length = src.length; + sg_set_page(&src, desc->bd_iov[i].kiov_page, + (desc->bd_iov[i].kiov_len + blocksize - 1) & + (~(blocksize - 1)), + desc->bd_iov[i].kiov_offset); + if (adj_nob) + nob += src.length; + sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length, + src.offset); desc->bd_enc_iov[i].kiov_offset = dst.offset; desc->bd_enc_iov[i].kiov_len = dst.length; @@ -1146,13 +1141,12 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm, if (desc->bd_enc_iov[i].kiov_len == 0) continue; - src.page = desc->bd_enc_iov[i].kiov_page; - src.offset = desc->bd_enc_iov[i].kiov_offset; - src.length = desc->bd_enc_iov[i].kiov_len; - - dst = src; - if (desc->bd_iov[i].kiov_len % blocksize == 0) - dst.page = desc->bd_iov[i].kiov_page; + sg_set_page(&src, desc->bd_enc_iov[i].kiov_page, + desc->bd_enc_iov[i].kiov_len, + desc->bd_enc_iov[i].kiov_offset); + dst = src; + if (desc->bd_iov[i].kiov_len % blocksize == 0) + sg_assign_page(&dst, desc->bd_iov[i].kiov_page); rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, src.length); diff --git a/lustre/ptlrpc/gss/sec_gss.c b/lustre/ptlrpc/gss/sec_gss.c index 9046335..c244b1b 100644 --- a/lustre/ptlrpc/gss/sec_gss.c +++ b/lustre/ptlrpc/gss/sec_gss.c @@ -73,6 +73,7 @@ #include "gss_api.h" #include +#include /* * early reply have fixed size, respectively in privacy and integrity mode. -- 1.8.3.1