summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
d277f2a)
Now that there are no kvecs in ptlrpc_bulk_desc, only bdvecs, we can
simplify the access, discarding the containing struct and the macros,
and just accessing the fields directly.
Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: I068a7a280f130bf0b53b9c572ed47ef0cc999102
Reviewed-on: https://review.whamcloud.com/36973
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
/** array of associated MDs */
struct lnet_handle_md bd_mds[PTLRPC_BULK_OPS_COUNT];
/** array of associated MDs */
struct lnet_handle_md bd_mds[PTLRPC_BULK_OPS_COUNT];
- struct {
- /*
- * encrypt iov, size is either 0 or bd_iov_count.
- */
- lnet_kiov_t *bd_enc_vec;
- lnet_kiov_t *bd_vec;
- } bd_kiov;
+ /* encrypted iov, size is either 0 or bd_iov_count. */
+ lnet_kiov_t *bd_enc_vec;
+ lnet_kiov_t *bd_vec;
-#define GET_KIOV(desc) ((desc)->bd_kiov.bd_vec)
-#define BD_GET_KIOV(desc, i) ((desc)->bd_kiov.bd_vec[i])
-#define GET_ENC_KIOV(desc) ((desc)->bd_kiov.bd_enc_vec)
-#define BD_GET_ENC_KIOV(desc, i) ((desc)->bd_kiov.bd_enc_vec[i])
-
enum {
SVC_INIT = 0,
SVC_STOPPED = 1 << 0,
enum {
SVC_INIT = 0,
SVC_STOPPED = 1 << 0,
for (i = 0; i < page_count; i++) {
void *pz;
if (desc)
for (i = 0; i < page_count; i++) {
void *pz;
if (desc)
- pz = page_zone(BD_GET_KIOV(desc, i).kiov_page);
+ pz = page_zone(desc->bd_vec[i].kiov_page);
else
pz = page_zone(aa->aa_ppga[i]->pg);
else
pz = page_zone(aa->aa_ppga[i]->pg);
int i;
for (i = 0; i < desc->bd_iov_count ; i++)
int i;
for (i = 0; i < desc->bd_iov_count ; i++)
- put_page(BD_GET_KIOV(desc, i).kiov_page);
+ put_page(desc->bd_vec[i].kiov_page);
}
static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc,
}
static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc,
- OBD_ALLOC_LARGE(GET_KIOV(desc),
- nfrags * sizeof(*GET_KIOV(desc)));
- if (!GET_KIOV(desc))
+ OBD_ALLOC_LARGE(desc->bd_vec,
+ nfrags * sizeof(*desc->bd_vec));
+ if (!desc->bd_vec)
goto out;
spin_lock_init(&desc->bd_lock);
goto out;
spin_lock_init(&desc->bd_lock);
LASSERT(len > 0);
LASSERT(pageoffset + len <= PAGE_SIZE);
LASSERT(len > 0);
LASSERT(pageoffset + len <= PAGE_SIZE);
- kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
+ kiov = &desc->bd_vec[desc->bd_iov_count];
if (desc->bd_frag_ops->release_frags != NULL)
desc->bd_frag_ops->release_frags(desc);
if (desc->bd_frag_ops->release_frags != NULL)
desc->bd_frag_ops->release_frags(desc);
- OBD_FREE_LARGE(GET_KIOV(desc),
- desc->bd_max_iov * sizeof(*GET_KIOV(desc)));
+ OBD_FREE_LARGE(desc->bd_vec,
+ desc->bd_max_iov * sizeof(*desc->bd_vec));
OBD_FREE_PTR(desc);
EXIT;
}
OBD_FREE_PTR(desc);
EXIT;
}
maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
desc->bd_iov_count,
maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
desc->bd_iov_count,
&token);
if (maj != GSS_S_COMPLETE) {
CWARN("failed to sign bulk data: %x\n", maj);
&token);
if (maj != GSS_S_COMPLETE) {
CWARN("failed to sign bulk data: %x\n", maj);
/* fix the actual data size */
for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
/* fix the actual data size */
for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
- if (BD_GET_KIOV(desc, i).kiov_len + nob >
+ if (desc->bd_vec[i].kiov_len + nob >
desc->bd_nob_transferred) {
desc->bd_nob_transferred) {
- BD_GET_KIOV(desc, i).kiov_len =
+ desc->bd_vec[i].kiov_len =
desc->bd_nob_transferred - nob;
}
desc->bd_nob_transferred - nob;
}
- nob += BD_GET_KIOV(desc, i).kiov_len;
+ nob += desc->bd_vec[i].kiov_len;
}
token.data = bsdv->bsd_data;
}
token.data = bsdv->bsd_data;
maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
desc->bd_iov_count,
maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
desc->bd_iov_count,
&token);
if (maj != GSS_S_COMPLETE) {
CERROR("failed to verify bulk read: %x\n", maj);
&token);
if (maj != GSS_S_COMPLETE) {
CERROR("failed to verify bulk read: %x\n", maj);
maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
desc->bd_iov_count,
maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
desc->bd_iov_count,
- GET_KIOV(desc), &token);
if (maj != GSS_S_COMPLETE) {
bsdv->bsd_flags |= BSD_FL_ERR;
CERROR("failed to verify bulk signature: %x\n", maj);
if (maj != GSS_S_COMPLETE) {
bsdv->bsd_flags |= BSD_FL_ERR;
CERROR("failed to verify bulk signature: %x\n", maj);
maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
desc->bd_iov_count,
maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
desc->bd_iov_count,
- GET_KIOV(desc), &token);
if (maj != GSS_S_COMPLETE) {
bsdv->bsd_flags |= BSD_FL_ERR;
CERROR("failed to sign bulk data: %x\n", maj);
if (maj != GSS_S_COMPLETE) {
bsdv->bsd_flags |= BSD_FL_ERR;
CERROR("failed to sign bulk data: %x\n", maj);
int blocksize, i, rc, nob = 0;
LASSERT(desc->bd_iov_count);
int blocksize, i, rc, nob = 0;
LASSERT(desc->bd_iov_count);
- LASSERT(GET_ENC_KIOV(desc));
+ LASSERT(desc->bd_enc_vec);
blocksize = crypto_blkcipher_blocksize(tfm);
LASSERT(blocksize > 1);
blocksize = crypto_blkcipher_blocksize(tfm);
LASSERT(blocksize > 1);
/* encrypt clear pages */
for (i = 0; i < desc->bd_iov_count; i++) {
sg_init_table(&src, 1);
/* encrypt clear pages */
for (i = 0; i < desc->bd_iov_count; i++) {
sg_init_table(&src, 1);
- sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
- (BD_GET_KIOV(desc, i).kiov_len +
+ sg_set_page(&src, desc->bd_vec[i].kiov_page,
+ (desc->bd_vec[i].kiov_len +
blocksize - 1) &
(~(blocksize - 1)),
blocksize - 1) &
(~(blocksize - 1)),
- BD_GET_KIOV(desc, i).kiov_offset);
+ desc->bd_vec[i].kiov_offset);
if (adj_nob)
nob += src.length;
sg_init_table(&dst, 1);
if (adj_nob)
nob += src.length;
sg_init_table(&dst, 1);
- sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
+ sg_set_page(&dst, desc->bd_enc_vec[i].kiov_page,
- BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
- BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
+ desc->bd_enc_vec[i].kiov_offset = dst.offset;
+ desc->bd_enc_vec[i].kiov_len = dst.length;
rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
src.length);
rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
src.length);
int blocksize, i, rc;
LASSERT(desc->bd_iov_count);
int blocksize, i, rc;
LASSERT(desc->bd_iov_count);
- LASSERT(GET_ENC_KIOV(desc));
+ LASSERT(desc->bd_enc_vec);
LASSERT(desc->bd_nob_transferred);
blocksize = crypto_blkcipher_blocksize(tfm);
LASSERT(desc->bd_nob_transferred);
blocksize = crypto_blkcipher_blocksize(tfm);
for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
i++) {
for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
i++) {
- if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
+ if (desc->bd_enc_vec[i].kiov_offset % blocksize
- BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
+ desc->bd_enc_vec[i].kiov_len % blocksize
!= 0) {
CERROR("page %d: odd offset %u len %u, blocksize %d\n",
!= 0) {
CERROR("page %d: odd offset %u len %u, blocksize %d\n",
- i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
- BD_GET_ENC_KIOV(desc, i).kiov_len,
+ i, desc->bd_enc_vec[i].kiov_offset,
+ desc->bd_enc_vec[i].kiov_len,
blocksize);
return -EFAULT;
}
if (adj_nob) {
blocksize);
return -EFAULT;
}
if (adj_nob) {
- if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
+ if (ct_nob + desc->bd_enc_vec[i].kiov_len >
desc->bd_nob_transferred)
desc->bd_nob_transferred)
- BD_GET_ENC_KIOV(desc, i).kiov_len =
+ desc->bd_enc_vec[i].kiov_len =
desc->bd_nob_transferred - ct_nob;
desc->bd_nob_transferred - ct_nob;
- BD_GET_KIOV(desc, i).kiov_len =
- BD_GET_ENC_KIOV(desc, i).kiov_len;
- if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
+ desc->bd_vec[i].kiov_len =
+ desc->bd_enc_vec[i].kiov_len;
+ if (pt_nob + desc->bd_enc_vec[i].kiov_len >
- BD_GET_KIOV(desc, i).kiov_len =
+ desc->bd_vec[i].kiov_len =
desc->bd_nob - pt_nob;
} else {
/* this should be guaranteed by LNET */
desc->bd_nob - pt_nob;
} else {
/* this should be guaranteed by LNET */
- LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
+ LASSERT(ct_nob + desc->bd_enc_vec[i].
kiov_len <=
desc->bd_nob_transferred);
kiov_len <=
desc->bd_nob_transferred);
- LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
- BD_GET_ENC_KIOV(desc, i).kiov_len);
+ LASSERT(desc->bd_vec[i].kiov_len <=
+ desc->bd_enc_vec[i].kiov_len);
- if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
+ if (desc->bd_enc_vec[i].kiov_len == 0)
continue;
sg_init_table(&src, 1);
continue;
sg_init_table(&src, 1);
- sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
- BD_GET_ENC_KIOV(desc, i).kiov_len,
- BD_GET_ENC_KIOV(desc, i).kiov_offset);
+ sg_set_page(&src, desc->bd_enc_vec[i].kiov_page,
+ desc->bd_enc_vec[i].kiov_len,
+ desc->bd_enc_vec[i].kiov_offset);
- if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
+ if (desc->bd_vec[i].kiov_len % blocksize == 0)
- BD_GET_KIOV(desc, i).kiov_page);
+ desc->bd_vec[i].kiov_page);
rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
src.length);
rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
src.length);
- if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
- memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
- BD_GET_KIOV(desc, i).kiov_offset,
- page_address(BD_GET_ENC_KIOV(desc, i).
+ if (desc->bd_vec[i].kiov_len % blocksize != 0) {
+ memcpy(page_address(desc->bd_vec[i].kiov_page) +
+ desc->bd_vec[i].kiov_offset,
+ page_address(desc->bd_enc_vec[i].
- BD_GET_KIOV(desc, i).kiov_offset,
- BD_GET_KIOV(desc, i).kiov_len);
+ desc->bd_vec[i].kiov_offset,
+ desc->bd_vec[i].kiov_len);
- ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
- pt_nob += BD_GET_KIOV(desc, i).kiov_len;
+ ct_nob += desc->bd_enc_vec[i].kiov_len;
+ pt_nob += desc->bd_vec[i].kiov_len;
}
if (unlikely(ct_nob != desc->bd_nob_transferred)) {
}
if (unlikely(ct_nob != desc->bd_nob_transferred)) {
/* if needed, clear up the rest unused iovs */
if (adj_nob)
while (i < desc->bd_iov_count)
/* if needed, clear up the rest unused iovs */
if (adj_nob)
while (i < desc->bd_iov_count)
- BD_GET_KIOV(desc, i++).kiov_len = 0;
+ desc->bd_vec[i++].kiov_len = 0;
/* decrypt tail (krb5 header) */
rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
/* decrypt tail (krb5 header) */
rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
int blocksize, i;
LASSERT(desc->bd_iov_count);
int blocksize, i;
LASSERT(desc->bd_iov_count);
- LASSERT(GET_ENC_KIOV(desc));
+ LASSERT(desc->bd_enc_vec);
LASSERT(kctx->kc_keye.kb_tfm);
blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
LASSERT(kctx->kc_keye.kb_tfm);
blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
+ LASSERT(desc->bd_enc_vec[i].kiov_page);
/*
* offset should always start at page boundary of either
* client or server side.
*/
/*
* offset should always start at page boundary of either
* client or server side.
*/
- if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
+ if (desc->bd_vec[i].kiov_offset & blocksize) {
CERROR("odd offset %d in page %d\n",
CERROR("odd offset %d in page %d\n",
- BD_GET_KIOV(desc, i).kiov_offset, i);
+ desc->bd_vec[i].kiov_offset, i);
- BD_GET_ENC_KIOV(desc, i).kiov_offset =
- BD_GET_KIOV(desc, i).kiov_offset;
- BD_GET_ENC_KIOV(desc, i).kiov_len =
- (BD_GET_KIOV(desc, i).kiov_len +
+ desc->bd_enc_vec[i].kiov_offset =
+ desc->bd_vec[i].kiov_offset;
+ desc->bd_enc_vec[i].kiov_len =
+ (desc->bd_vec[i].kiov_len +
blocksize - 1) & (~(blocksize - 1));
}
blocksize - 1) & (~(blocksize - 1));
}
/* compute checksum */
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
khdr, 1, data_desc,
/* compute checksum */
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
khdr, 1, data_desc,
- desc->bd_iov_count, GET_KIOV(desc),
+ desc->bd_iov_count, desc->bd_vec,
&cksum, gctx->hash_func))
GOTO(out_free_cksum, major = GSS_S_FAILURE);
LASSERT(cksum.len >= ke->ke_hash_size);
&cksum, gctx->hash_func))
GOTO(out_free_cksum, major = GSS_S_FAILURE);
LASSERT(cksum.len >= ke->ke_hash_size);
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
khdr, 1, data_desc,
desc->bd_iov_count,
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
khdr, 1, data_desc,
desc->bd_iov_count,
&cksum, gctx->hash_func))
return GSS_S_FAILURE;
LASSERT(cksum.len >= ke->ke_hash_size);
&cksum, gctx->hash_func))
return GSS_S_FAILURE;
LASSERT(cksum.len >= ke->ke_hash_size);
blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
- if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
+ if (desc->bd_vec[i].kiov_offset & blocksize) {
CERROR("offset %d not blocksize aligned\n",
CERROR("offset %d not blocksize aligned\n",
- BD_GET_KIOV(desc, i).kiov_offset);
+ desc->bd_vec[i].kiov_offset);
- BD_GET_ENC_KIOV(desc, i).kiov_offset =
- BD_GET_KIOV(desc, i).kiov_offset;
- BD_GET_ENC_KIOV(desc, i).kiov_len =
- sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize);
+ desc->bd_enc_vec[i].kiov_offset =
+ desc->bd_vec[i].kiov_offset;
+ desc->bd_enc_vec[i].kiov_len =
+ sk_block_mask(desc->bd_vec[i].kiov_len, blocksize);
sg_init_table(&ctxt, 1);
for (i = 0; i < desc->bd_iov_count; i++) {
sg_init_table(&ctxt, 1);
for (i = 0; i < desc->bd_iov_count; i++) {
- sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page,
- sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
+ sg_set_page(&ptxt, desc->bd_vec[i].kiov_page,
+ sk_block_mask(desc->bd_vec[i].kiov_len,
- BD_GET_KIOV(desc, i).kiov_offset);
+ desc->bd_vec[i].kiov_offset);
- sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
+ sg_set_page(&ctxt, desc->bd_enc_vec[i].kiov_page,
ptxt.length, ptxt.offset);
ptxt.length, ptxt.offset);
- BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
- BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
+ desc->bd_enc_vec[i].kiov_offset = ctxt.offset;
+ desc->bd_enc_vec[i].kiov_len = ctxt.length;
rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
ptxt.length);
rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
ptxt.length);
for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
i++) {
for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
i++) {
- lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
- lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
+ lnet_kiov_t *piov = &desc->bd_vec[i];
+ lnet_kiov_t *ciov = &desc->bd_enc_vec[i];
if (ciov->kiov_offset % blocksize != 0 ||
ciov->kiov_len % blocksize != 0) {
if (ciov->kiov_offset % blocksize != 0 ||
ciov->kiov_len % blocksize != 0) {
/* if needed, clear up the rest unused iovs */
if (adj_nob)
while (i < desc->bd_iov_count)
/* if needed, clear up the rest unused iovs */
if (adj_nob)
while (i < desc->bd_iov_count)
- BD_GET_KIOV(desc, i++).kiov_len = 0;
+ desc->bd_vec[i++].kiov_len = 0;
if (unlikely(cnob != desc->bd_nob_transferred)) {
CERROR("%d cipher text transferred but only %d decrypted\n",
if (unlikely(cnob != desc->bd_nob_transferred)) {
CERROR("%d cipher text transferred but only %d decrypted\n",
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
skw.skw_hmac.len = sht_bytes;
if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher,
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
skw.skw_hmac.len = sht_bytes;
if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher,
- desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac,
+ desc->bd_iov_count, desc->bd_enc_vec, &skw.skw_hmac,
gss_context->hash_func))
return GSS_S_FAILURE;
gss_context->hash_func))
return GSS_S_FAILURE;
rc = sk_verify_bulk_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1,
&skw.skw_cipher, desc->bd_iov_count,
rc = sk_verify_bulk_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1,
&skw.skw_cipher, desc->bd_iov_count,
- GET_ENC_KIOV(desc), desc->bd_nob,
+ desc->bd_enc_vec, desc->bd_nob,
&skw.skw_hmac);
if (rc)
return rc;
&skw.skw_hmac);
if (rc)
return rc;
md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);
md->options |= LNET_MD_KIOV;
md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);
md->options |= LNET_MD_KIOV;
- if (GET_ENC_KIOV(desc))
- md->start = &BD_GET_ENC_KIOV(desc, mdidx *
- LNET_MAX_IOV);
+ if (desc->bd_enc_vec)
+ md->start = &desc->bd_enc_vec[mdidx *
+ LNET_MAX_IOV];
- md->start = &BD_GET_KIOV(desc, mdidx * LNET_MAX_IOV);
+ md->start = &desc->bd_vec[mdidx * LNET_MAX_IOV];
LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
/* resent bulk, enc iov might have been allocated previously */
LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
/* resent bulk, enc iov might have been allocated previously */
- if (GET_ENC_KIOV(desc) != NULL)
+ if (desc->bd_enc_vec != NULL)
- OBD_ALLOC_LARGE(GET_ENC_KIOV(desc),
- desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc)));
- if (GET_ENC_KIOV(desc) == NULL)
+ OBD_ALLOC_LARGE(desc->bd_enc_vec,
+ desc->bd_iov_count * sizeof(*desc->bd_enc_vec));
+ if (desc->bd_enc_vec == NULL)
return -ENOMEM;
spin_lock(&page_pools.epp_lock);
return -ENOMEM;
spin_lock(&page_pools.epp_lock);
*/
page_pools.epp_st_outofmem++;
spin_unlock(&page_pools.epp_lock);
*/
page_pools.epp_st_outofmem++;
spin_unlock(&page_pools.epp_lock);
- OBD_FREE_LARGE(GET_ENC_KIOV(desc),
+ OBD_FREE_LARGE(desc->bd_enc_vec,
- sizeof(*GET_ENC_KIOV(desc)));
- GET_ENC_KIOV(desc) = NULL;
+ sizeof(*desc->bd_enc_vec));
+ desc->bd_enc_vec = NULL;
for (i = 0; i < desc->bd_iov_count; i++) {
LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
for (i = 0; i < desc->bd_iov_count; i++) {
LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
- BD_GET_ENC_KIOV(desc, i).kiov_page =
+ desc->bd_enc_vec[i].kiov_page =
page_pools.epp_pools[p_idx][g_idx];
page_pools.epp_pools[p_idx][g_idx] = NULL;
page_pools.epp_pools[p_idx][g_idx];
page_pools.epp_pools[p_idx][g_idx] = NULL;
- if (GET_ENC_KIOV(desc) == NULL)
+ if (desc->bd_enc_vec == NULL)
return;
LASSERT(desc->bd_iov_count > 0);
return;
LASSERT(desc->bd_iov_count > 0);
LASSERT(page_pools.epp_pools[p_idx]);
for (i = 0; i < desc->bd_iov_count; i++) {
LASSERT(page_pools.epp_pools[p_idx]);
for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page != NULL);
+ LASSERT(desc->bd_enc_vec[i].kiov_page != NULL);
LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
page_pools.epp_pools[p_idx][g_idx] =
LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
page_pools.epp_pools[p_idx][g_idx] =
- BD_GET_ENC_KIOV(desc, i).kiov_page;
+ desc->bd_enc_vec[i].kiov_page;
if (++g_idx == PAGES_PER_POOL) {
p_idx++;
if (++g_idx == PAGES_PER_POOL) {
p_idx++;
spin_unlock(&page_pools.epp_lock);
spin_unlock(&page_pools.epp_lock);
- OBD_FREE_LARGE(GET_ENC_KIOV(desc),
- desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc)));
- GET_ENC_KIOV(desc) = NULL;
+ OBD_FREE_LARGE(desc->bd_enc_vec,
+ desc->bd_iov_count * sizeof(*desc->bd_enc_vec));
+ desc->bd_enc_vec = NULL;
for (i = 0; i < desc->bd_iov_count; i++) {
cfs_crypto_hash_update_page(req,
for (i = 0; i < desc->bd_iov_count; i++) {
cfs_crypto_hash_update_page(req,
- BD_GET_KIOV(desc, i).kiov_page,
- BD_GET_KIOV(desc, i).kiov_offset &
+ desc->bd_vec[i].kiov_page,
+ desc->bd_vec[i].kiov_offset &
- BD_GET_KIOV(desc, i).kiov_len);
+ desc->bd_vec[i].kiov_len);
}
if (hashsize > buflen) {
}
if (hashsize > buflen) {
unsigned int off, i;
for (i = 0; i < desc->bd_iov_count; i++) {
unsigned int off, i;
for (i = 0; i < desc->bd_iov_count; i++) {
- if (BD_GET_KIOV(desc, i).kiov_len == 0)
+ if (desc->bd_vec[i].kiov_len == 0)
- ptr = kmap(BD_GET_KIOV(desc, i).kiov_page);
- off = BD_GET_KIOV(desc, i).kiov_offset & ~PAGE_MASK;
+ ptr = kmap(desc->bd_vec[i].kiov_page);
+ off = desc->bd_vec[i].kiov_offset & ~PAGE_MASK;
- kunmap(BD_GET_KIOV(desc, i).kiov_page);
+ kunmap(desc->bd_vec[i].kiov_page);
/* fix the actual data size */
for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
/* fix the actual data size */
for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
- if (BD_GET_KIOV(desc, i).kiov_len +
+ if (desc->bd_vec[i].kiov_len +
nob > desc->bd_nob_transferred) {
nob > desc->bd_nob_transferred) {
- BD_GET_KIOV(desc, i).kiov_len =
+ desc->bd_vec[i].kiov_len =
desc->bd_nob_transferred - nob;
}
desc->bd_nob_transferred - nob;
}
- nob += BD_GET_KIOV(desc, i).kiov_len;
+ nob += desc->bd_vec[i].kiov_len;
}
rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
}
rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,