struct gss_keyblock *kb,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
- int iovcnt, lnet_kiov_t *iovs,
+ int iovcnt, struct bio_vec *iovs,
rawobj_t *cksum,
digest_hash hash_func)
{
int msgcnt,
rawobj_t *msgs,
int iovcnt,
- lnet_kiov_t *iovs,
+ struct bio_vec *iovs,
rawobj_t *token)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
int msgcnt,
rawobj_t *msgs,
int iovcnt,
- lnet_kiov_t *iovs,
+ struct bio_vec *iovs,
rawobj_t *token)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
/* encrypt clear pages */
for (i = 0; i < desc->bd_iov_count; i++) {
sg_init_table(&src, 1);
- sg_set_page(&src, desc->bd_vec[i].kiov_page,
- (desc->bd_vec[i].kiov_len +
+ sg_set_page(&src, desc->bd_vec[i].bv_page,
+ (desc->bd_vec[i].bv_len +
blocksize - 1) &
(~(blocksize - 1)),
- desc->bd_vec[i].kiov_offset);
+ desc->bd_vec[i].bv_offset);
if (adj_nob)
nob += src.length;
sg_init_table(&dst, 1);
- sg_set_page(&dst, desc->bd_enc_vec[i].kiov_page,
+ sg_set_page(&dst, desc->bd_enc_vec[i].bv_page,
src.length, src.offset);
- desc->bd_enc_vec[i].kiov_offset = dst.offset;
- desc->bd_enc_vec[i].kiov_len = dst.length;
+ desc->bd_enc_vec[i].bv_offset = dst.offset;
+ desc->bd_enc_vec[i].bv_len = dst.length;
rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
src.length);
* desc->bd_nob_transferred is the size of cipher text received.
* desc->bd_nob is the target size of plain text supposed to be.
*
- * if adj_nob != 0, we adjust each page's kiov_len to the actual
+ * if adj_nob != 0, we adjust each page's bv_len to the actual
* plain text size.
* - for client read: we don't know data size for each page, so
- * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
+ * bd_iov[]->bv_len is set to PAGE_SIZE, but actual data received might
* be smaller, so we need to adjust it according to
- * bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
+ * bd_u.bd_kiov.bd_enc_vec[]->bv_len.
* this means we DO NOT support the situation that server send an odd size
* data in a page which is not the last one.
* - for server write: we knows exactly data size for each page being expected,
- * thus kiov_len is accurate already, so we should not adjust it at all.
- * and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
- * round_up(bd_iov[]->kiov_len) which
+ * thus bv_len is accurate already, so we should not adjust it at all.
+ * and bd_u.bd_kiov.bd_enc_vec[]->bv_len should be
+ * round_up(bd_iov[]->bv_len) which
* should have been done by prep_bulk().
*/
static
for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
i++) {
- if (desc->bd_enc_vec[i].kiov_offset % blocksize
+ if (desc->bd_enc_vec[i].bv_offset % blocksize
!= 0 ||
- desc->bd_enc_vec[i].kiov_len % blocksize
+ desc->bd_enc_vec[i].bv_len % blocksize
!= 0) {
CERROR("page %d: odd offset %u len %u, blocksize %d\n",
- i, desc->bd_enc_vec[i].kiov_offset,
- desc->bd_enc_vec[i].kiov_len,
+ i, desc->bd_enc_vec[i].bv_offset,
+ desc->bd_enc_vec[i].bv_len,
blocksize);
return -EFAULT;
}
if (adj_nob) {
- if (ct_nob + desc->bd_enc_vec[i].kiov_len >
+ if (ct_nob + desc->bd_enc_vec[i].bv_len >
desc->bd_nob_transferred)
- desc->bd_enc_vec[i].kiov_len =
+ desc->bd_enc_vec[i].bv_len =
desc->bd_nob_transferred - ct_nob;
- desc->bd_vec[i].kiov_len =
- desc->bd_enc_vec[i].kiov_len;
- if (pt_nob + desc->bd_enc_vec[i].kiov_len >
+ desc->bd_vec[i].bv_len =
+ desc->bd_enc_vec[i].bv_len;
+ if (pt_nob + desc->bd_enc_vec[i].bv_len >
desc->bd_nob)
- desc->bd_vec[i].kiov_len =
+ desc->bd_vec[i].bv_len =
desc->bd_nob - pt_nob;
} else {
/* this should be guaranteed by LNET */
LASSERT(ct_nob + desc->bd_enc_vec[i].
- kiov_len <=
+ bv_len <=
desc->bd_nob_transferred);
- LASSERT(desc->bd_vec[i].kiov_len <=
- desc->bd_enc_vec[i].kiov_len);
+ LASSERT(desc->bd_vec[i].bv_len <=
+ desc->bd_enc_vec[i].bv_len);
}
- if (desc->bd_enc_vec[i].kiov_len == 0)
+ if (desc->bd_enc_vec[i].bv_len == 0)
continue;
sg_init_table(&src, 1);
- sg_set_page(&src, desc->bd_enc_vec[i].kiov_page,
- desc->bd_enc_vec[i].kiov_len,
- desc->bd_enc_vec[i].kiov_offset);
+ sg_set_page(&src, desc->bd_enc_vec[i].bv_page,
+ desc->bd_enc_vec[i].bv_len,
+ desc->bd_enc_vec[i].bv_offset);
dst = src;
- if (desc->bd_vec[i].kiov_len % blocksize == 0)
+ if (desc->bd_vec[i].bv_len % blocksize == 0)
sg_assign_page(&dst,
- desc->bd_vec[i].kiov_page);
+ desc->bd_vec[i].bv_page);
rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
src.length);
return rc;
}
- if (desc->bd_vec[i].kiov_len % blocksize != 0) {
- memcpy(page_address(desc->bd_vec[i].kiov_page) +
- desc->bd_vec[i].kiov_offset,
+ if (desc->bd_vec[i].bv_len % blocksize != 0) {
+ memcpy(page_address(desc->bd_vec[i].bv_page) +
+ desc->bd_vec[i].bv_offset,
page_address(desc->bd_enc_vec[i].
- kiov_page) +
- desc->bd_vec[i].kiov_offset,
- desc->bd_vec[i].kiov_len);
+ bv_page) +
+ desc->bd_vec[i].bv_offset,
+ desc->bd_vec[i].bv_len);
}
- ct_nob += desc->bd_enc_vec[i].kiov_len;
- pt_nob += desc->bd_vec[i].kiov_len;
+ ct_nob += desc->bd_enc_vec[i].bv_len;
+ pt_nob += desc->bd_vec[i].bv_len;
}
if (unlikely(ct_nob != desc->bd_nob_transferred)) {
/* if needed, clear up the rest unused iovs */
if (adj_nob)
while (i < desc->bd_iov_count)
- desc->bd_vec[i++].kiov_len = 0;
+ desc->bd_vec[i++].bv_len = 0;
/* decrypt tail (krb5 header) */
rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(desc->bd_enc_vec[i].kiov_page);
+ LASSERT(desc->bd_enc_vec[i].bv_page);
/*
* offset should always start at page boundary of either
* client or server side.
*/
- if (desc->bd_vec[i].kiov_offset & blocksize) {
+ if (desc->bd_vec[i].bv_offset & blocksize) {
CERROR("odd offset %d in page %d\n",
- desc->bd_vec[i].kiov_offset, i);
+ desc->bd_vec[i].bv_offset, i);
return GSS_S_FAILURE;
}
- desc->bd_enc_vec[i].kiov_offset =
- desc->bd_vec[i].kiov_offset;
- desc->bd_enc_vec[i].kiov_len =
- (desc->bd_vec[i].kiov_len +
+ desc->bd_enc_vec[i].bv_offset =
+ desc->bd_vec[i].bv_offset;
+ desc->bd_enc_vec[i].bv_len =
+ (desc->bd_vec[i].bv_len +
blocksize - 1) & (~(blocksize - 1));
}