Since kernel 2.6.24 the scatterlist struct has no field "page".
Then related Lustre kerberos code cannot work anymore.
So do not access scatterlist::page directly, instead, use the
scatterlist functions sg_set_page and sg_assign_page.
Signed-off-by: Fan Yong <fan.yong@intel.com>
Signed-off-by: Thomas Stibor <thomas@stibor.net>
Change-Id: I446925bb42c1e018a55a69948383c8f71976f1fa
Reviewed-on: http://review.whamcloud.com/4394
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Nathaniel Clark <nathaniel.l.clark@intel.com>
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SCATTERLIST_SETPAGE, 1,
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SCATTERLIST_SETPAGE, 1,
- [struct scatterlist has page member])
+ [struct scatterlist has no page member])
sg->offset = offset;
sg->length = len;
}
sg->offset = offset;
sg->length = len;
}
+
+static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
+{
+ sg->page = page;
+}
#endif
#define cfs_smp_processor_id() smp_processor_id()
#endif
#define cfs_smp_processor_id() smp_processor_id()
static
void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
{
static
void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
{
- sg->page = virt_to_page(ptr);
- sg->offset = offset_in_page(ptr);
- sg->length = len;
+ sg_set_buf(sg, ptr, len);
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
- sg[0].page = iovs[i].kiov_page;
- sg[0].offset = iovs[i].kiov_offset;
- sg[0].length = iovs[i].kiov_len;
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
- sg[0].page = iovs[i].kiov_page;
- sg[0].offset = iovs[i].kiov_offset;
- sg[0].length = iovs[i].kiov_len;
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
crypto_hmac_update(tfm, sg, 1);
}
crypto_hmac_update(tfm, sg, 1);
}
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
- sg[0].page = iovs[i].kiov_page;
- sg[0].offset = iovs[i].kiov_offset;
- sg[0].length = iovs[i].kiov_len;
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
/* encrypt clear pages */
for (i = 0; i < desc->bd_iov_count; i++) {
/* encrypt clear pages */
for (i = 0; i < desc->bd_iov_count; i++) {
- src.page = desc->bd_iov[i].kiov_page;
- src.offset = desc->bd_iov[i].kiov_offset;
- src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
- (~(blocksize - 1));
-
- if (adj_nob)
- nob += src.length;
-
- dst.page = desc->bd_enc_iov[i].kiov_page;
- dst.offset = src.offset;
- dst.length = src.length;
+ sg_set_page(&src, desc->bd_iov[i].kiov_page,
+ (desc->bd_iov[i].kiov_len + blocksize - 1) &
+ (~(blocksize - 1)),
+ desc->bd_iov[i].kiov_offset);
+ if (adj_nob)
+ nob += src.length;
+ sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
+ src.offset);
desc->bd_enc_iov[i].kiov_offset = dst.offset;
desc->bd_enc_iov[i].kiov_len = dst.length;
desc->bd_enc_iov[i].kiov_offset = dst.offset;
desc->bd_enc_iov[i].kiov_len = dst.length;
if (desc->bd_enc_iov[i].kiov_len == 0)
continue;
if (desc->bd_enc_iov[i].kiov_len == 0)
continue;
- src.page = desc->bd_enc_iov[i].kiov_page;
- src.offset = desc->bd_enc_iov[i].kiov_offset;
- src.length = desc->bd_enc_iov[i].kiov_len;
-
- dst = src;
- if (desc->bd_iov[i].kiov_len % blocksize == 0)
- dst.page = desc->bd_iov[i].kiov_page;
+ sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
+ desc->bd_enc_iov[i].kiov_len,
+ desc->bd_enc_iov[i].kiov_offset);
+ dst = src;
+ if (desc->bd_iov[i].kiov_len % blocksize == 0)
+ sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
src.length);
rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
src.length);
#include "gss_api.h"
#include <linux/crypto.h>
#include "gss_api.h"
#include <linux/crypto.h>
+#include <linux/crc32.h>
/*
* early reply have fixed size, respectively in privacy and integrity mode.
/*
* early reply have fixed size, respectively in privacy and integrity mode.