KVEC descriptors are no long used nor needed.
KIOV are sufficient for all needs.
This allows us to remove
PTLRPC_BULK_BUF_KVEC
and
PTLRPC_BULK_BUF_KIOV
flags - the distinction no longer exists.
Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: Ic3a6ec942b60a05c7ce6c5b05659700e1399d0b9
Reviewed-on: https://review.whamcloud.com/36971
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Reviewed-by: Mike Pershin <mpershin@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
PTLRPC_BULK_OP_PASSIVE = 0x00000002,
PTLRPC_BULK_OP_PUT = 0x00000004,
PTLRPC_BULK_OP_GET = 0x00000008,
- PTLRPC_BULK_BUF_KVEC = 0x00000010,
- PTLRPC_BULK_BUF_KIOV = 0x00000020,
PTLRPC_BULK_GET_SOURCE = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_GET,
PTLRPC_BULK_PUT_SINK = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_PUT,
PTLRPC_BULK_GET_SINK = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_GET,
return (type & PTLRPC_BULK_PUT_SOURCE) == PTLRPC_BULK_PUT_SOURCE;
}
-static inline bool ptlrpc_is_bulk_desc_kvec(enum ptlrpc_bulk_op_type type)
-{
- return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV))
- == PTLRPC_BULK_BUF_KVEC;
-}
-
-static inline bool ptlrpc_is_bulk_desc_kiov(enum ptlrpc_bulk_op_type type)
-{
- return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV))
- == PTLRPC_BULK_BUF_KIOV;
-}
-
static inline bool ptlrpc_is_bulk_op_active(enum ptlrpc_bulk_op_type type)
{
return ((type & PTLRPC_BULK_OP_ACTIVE) |
extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops;
extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops;
-extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops;
/*
* Definition of bulk descriptor.
const struct ptlrpc_bulk_frag_ops
*ops);
-int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
- void *frag, int len);
void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
struct page *page, int pageoffset, int len,
int pin);
ptlrpc_at_set_req_timeout(req);
desc = ptlrpc_prep_bulk_imp(req, npages, 1,
- PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_PUT_SINK,
MDS_BULK_PORTAL,
&ptlrpc_bulk_kiov_pin_ops);
if (desc == NULL) {
/* allocate bulk transfer descriptor */
desc = ptlrpc_prep_bulk_imp(req, nrpages, 1,
- PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_PUT_SINK,
MGS_BULK_PORTAL,
&ptlrpc_bulk_kiov_pin_ops);
if (desc == NULL)
page_count = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
LASSERT(page_count <= nrpages);
desc = ptlrpc_prep_bulk_exp(req, page_count, 1,
- PTLRPC_BULK_PUT_SOURCE |
- PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_PUT_SOURCE,
MGS_BULK_PORTAL,
&ptlrpc_bulk_kiov_pin_ops);
if (!desc)
int i;
if (desc != NULL) {
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
page_count = desc->bd_iov_count;
} else {
page_count = aa->aa_page_count;
desc = ptlrpc_prep_bulk_imp(req, page_count,
cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
(opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
- PTLRPC_BULK_PUT_SINK) |
- PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_PUT_SINK),
OST_BULK_PORTAL,
&ptlrpc_bulk_kiov_pin_ops);
/* allocate bulk descriptor */
desc = ptlrpc_prep_bulk_imp(req, pages, 1,
- PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_PUT_SINK,
MDS_BULK_PORTAL,
&ptlrpc_bulk_kiov_nopin_ops);
if (desc == NULL)
ptlrpc_at_set_req_timeout(req);
desc = ptlrpc_prep_bulk_imp(req, npages, 1,
- PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_PUT_SINK,
MDS_BULK_PORTAL,
&ptlrpc_bulk_kiov_pin_ops);
if (desc == NULL)
req->rq_bulk_write = 1;
desc = ptlrpc_prep_bulk_imp(req, page_count,
MD_MAX_BRW_SIZE >> LNET_MTU_BITS,
- PTLRPC_BULK_GET_SOURCE | PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_GET_SOURCE,
MDS_BULK_PORTAL, &ptlrpc_bulk_kiov_nopin_ops);
if (desc == NULL)
GOTO(out_req, rc = -ENOMEM);
};
EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
-const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops = {
- .add_iov_frag = ptlrpc_prep_bulk_frag,
-};
-EXPORT_SYMBOL(ptlrpc_bulk_kvec_ops);
-
static int ptlrpc_send_new_req(struct ptlrpc_request *req);
static int ptlrpcd_check_work(struct ptlrpc_request *req);
static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
struct ptlrpc_bulk_desc *desc;
int i;
- /* ensure that only one of KIOV or IOVEC is set but not both */
- LASSERT((ptlrpc_is_bulk_desc_kiov(type) &&
- ops->add_kiov_frag != NULL) ||
- (ptlrpc_is_bulk_desc_kvec(type) &&
- ops->add_iov_frag != NULL));
+ LASSERT(ops->add_kiov_frag != NULL);
OBD_ALLOC_PTR(desc);
if (!desc)
return NULL;
- if (type & PTLRPC_BULK_BUF_KIOV) {
- OBD_ALLOC_LARGE(GET_KIOV(desc),
- nfrags * sizeof(*GET_KIOV(desc)));
- if (!GET_KIOV(desc))
- goto out;
- } else {
- OBD_ALLOC_LARGE(GET_KVEC(desc),
- nfrags * sizeof(*GET_KVEC(desc)));
- if (!GET_KVEC(desc))
- goto out;
- }
+
+ OBD_ALLOC_LARGE(GET_KIOV(desc),
+ nfrags * sizeof(*GET_KIOV(desc)));
+ if (!GET_KIOV(desc))
+ goto out;
spin_lock_init(&desc->bd_lock);
init_waitqueue_head(&desc->bd_waitq);
LASSERT(pageoffset >= 0);
LASSERT(len > 0);
LASSERT(pageoffset + len <= PAGE_SIZE);
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
}
EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
-int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
- void *frag, int len)
-{
- struct kvec *iovec;
-
- ENTRY;
-
- LASSERT(desc->bd_iov_count < desc->bd_max_iov);
- LASSERT(frag != NULL);
- LASSERT(len > 0);
- LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type));
-
- iovec = &BD_GET_KVEC(desc, desc->bd_iov_count);
-
- desc->bd_nob += len;
-
- iovec->iov_base = frag;
- iovec->iov_len = len;
-
- desc->bd_iov_count++;
-
- RETURN(desc->bd_nob);
-}
-EXPORT_SYMBOL(ptlrpc_prep_bulk_frag);
-
void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
{
ENTRY;
LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
LASSERT(desc->bd_frag_ops != NULL);
- if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
- sptlrpc_enc_pool_put_pages(desc);
+ sptlrpc_enc_pool_put_pages(desc);
if (desc->bd_export)
class_export_put(desc->bd_export);
if (desc->bd_frag_ops->release_frags != NULL)
desc->bd_frag_ops->release_frags(desc);
- if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
- OBD_FREE_LARGE(GET_KIOV(desc),
- desc->bd_max_iov * sizeof(*GET_KIOV(desc)));
- else
- OBD_FREE_LARGE(GET_KVEC(desc),
- desc->bd_max_iov * sizeof(*GET_KVEC(desc)));
+ OBD_FREE_LARGE(GET_KIOV(desc),
+ desc->bd_max_iov * sizeof(*GET_KIOV(desc)));
OBD_FREE_PTR(desc);
EXIT;
}
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
LASSERT(gctx->gc_mechctx);
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
case SPTLRPC_SVC_NULL:
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_write);
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_read);
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
struct sg_table sg_src, sg_dst;
int blocksize, i, rc, nob = 0;
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(desc->bd_iov_count);
LASSERT(GET_ENC_KIOV(desc));
int ct_nob = 0, pt_nob = 0;
int blocksize, i, rc;
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(desc->bd_iov_count);
LASSERT(GET_ENC_KIOV(desc));
LASSERT(desc->bd_nob_transferred);
struct krb5_ctx *kctx = gctx->internal_ctx_id;
int blocksize, i;
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(desc->bd_iov_count);
LASSERT(GET_ENC_KIOV(desc));
LASSERT(kctx->kc_keye.kb_tfm);
int rc = 0;
u32 major;
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(ke);
LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
int rc;
__u32 major;
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(ke);
if (token->len < sizeof(*khdr)) {
page_count = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
LASSERT(page_count <= rdpg.rp_count);
desc = ptlrpc_prep_bulk_exp(req, page_count, 1,
- PTLRPC_BULK_PUT_SOURCE |
- PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_PUT_SOURCE,
MGS_BULK_PORTAL, frag_ops);
if (desc == NULL)
GOTO(out, rc = -ENOMEM);
md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV);
md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);
- if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) {
- md->options |= LNET_MD_KIOV;
- if (GET_ENC_KIOV(desc))
- md->start = &BD_GET_ENC_KIOV(desc, mdidx *
- LNET_MAX_IOV);
- else
- md->start = &BD_GET_KIOV(desc, mdidx * LNET_MAX_IOV);
- } else if (ptlrpc_is_bulk_desc_kvec(desc->bd_type)) {
- md->options |= LNET_MD_IOVEC;
- if (GET_ENC_KVEC(desc))
- md->start = &BD_GET_ENC_KVEC(desc, mdidx *
- LNET_MAX_IOV);
- else
- md->start = &BD_GET_KVEC(desc, mdidx * LNET_MAX_IOV);
- }
+ md->options |= LNET_MD_KIOV;
+ if (GET_ENC_KIOV(desc))
+ md->start = &BD_GET_ENC_KIOV(desc, mdidx *
+ LNET_MAX_IOV);
+ else
+ md->start = &BD_GET_KIOV(desc, mdidx * LNET_MAX_IOV);
}
int p_idx, g_idx;
int i;
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(desc->bd_iov_count > 0);
LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
int p_idx, g_idx;
int i;
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
-
if (GET_ENC_KIOV(desc) == NULL)
return;
unsigned int bufsize;
int i, err;
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX);
LASSERT(buflen >= 4);
char *ptr;
unsigned int off, i;
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
-
for (i = 0; i < desc->bd_iov_count; i++) {
if (BD_GET_KIOV(desc, i).kiov_len == 0)
continue;
int rc;
int i, nob;
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
/* allocate bulk descriptor */
desc = ptlrpc_prep_bulk_imp(req, npages, 1,
- PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_PUT_SINK,
MDS_BULK_PORTAL,
&ptlrpc_bulk_kiov_pin_ops);
if (desc == NULL)
desc = ptlrpc_prep_bulk_exp(pill->rc_req, page_count,
PTLRPC_BULK_OPS_COUNT,
- PTLRPC_BULK_GET_SINK |
- PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_GET_SINK,
MDS_BULK_PORTAL,
&ptlrpc_bulk_kiov_nopin_ops);
if (desc == NULL)
}
desc = ptlrpc_prep_bulk_exp(req, pages, 1,
- PTLRPC_BULK_PUT_SOURCE | PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_PUT_SOURCE,
MDS_BULK_PORTAL,
&ptlrpc_bulk_kiov_nopin_ops);
if (desc == NULL)
ENTRY;
desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1,
- PTLRPC_BULK_PUT_SOURCE |
- PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_PUT_SOURCE,
MDS_BULK_PORTAL,
&ptlrpc_bulk_kiov_pin_ops);
if (desc == NULL)
desc = NULL;
} else {
desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
- PTLRPC_BULK_PUT_SOURCE |
- PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_PUT_SOURCE,
OST_BULK_PORTAL,
&ptlrpc_bulk_kiov_nopin_ops);
if (desc == NULL)
desc = NULL;
} else {
desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
- PTLRPC_BULK_GET_SINK |
- PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_GET_SINK,
OST_BULK_PORTAL,
&ptlrpc_bulk_kiov_nopin_ops);
if (desc == NULL)