From 8151daa2c8d97917b37b9271c5d6936a75b48328 Mon Sep 17 00:00:00 2001 From: Mr NeilBrown Date: Thu, 21 Nov 2019 15:05:56 +1100 Subject: [PATCH] LU-13004 lustre: remove support for KVEC bulk descriptors KVEC descriptors are no long used nor needed. KIOV are sufficient for all needs. This allows us to remove PTLRPC_BULK_BUF_KVEC and PTLRPC_BULK_BUF_KIOV flags - the distinction no longer exists. Signed-off-by: Mr NeilBrown Change-Id: Ic3a6ec942b60a05c7ce6c5b05659700e1399d0b9 Reviewed-on: https://review.whamcloud.com/36971 Tested-by: jenkins Tested-by: Maloo Reviewed-by: James Simmons Reviewed-by: Shaun Tancheff Reviewed-by: Mike Pershin Reviewed-by: Oleg Drokin --- lustre/include/lustre_net.h | 17 ----------- lustre/mdc/mdc_request.c | 2 +- lustre/mgc/mgc_request.c | 2 +- lustre/mgs/mgs_nids.c | 3 +- lustre/osc/osc_page.c | 1 - lustre/osc/osc_request.c | 3 +- lustre/osp/osp_md_object.c | 2 +- lustre/osp/osp_object.c | 2 +- lustre/osp/osp_trans.c | 2 +- lustre/ptlrpc/client.c | 64 ++++++--------------------------------- lustre/ptlrpc/gss/gss_bulk.c | 4 --- lustre/ptlrpc/gss/gss_krb5_mech.c | 5 --- lustre/ptlrpc/nodemap_storage.c | 3 +- lustre/ptlrpc/pers.c | 21 ++++--------- lustre/ptlrpc/sec_bulk.c | 4 --- lustre/ptlrpc/sec_plain.c | 3 -- lustre/quota/qsd_request.c | 2 +- lustre/target/out_handler.c | 3 +- lustre/target/tgt_handler.c | 11 +++---- 19 files changed, 29 insertions(+), 125 deletions(-) diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index fe762ce..2e171a7 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -1333,8 +1333,6 @@ enum ptlrpc_bulk_op_type { PTLRPC_BULK_OP_PASSIVE = 0x00000002, PTLRPC_BULK_OP_PUT = 0x00000004, PTLRPC_BULK_OP_GET = 0x00000008, - PTLRPC_BULK_BUF_KVEC = 0x00000010, - PTLRPC_BULK_BUF_KIOV = 0x00000020, PTLRPC_BULK_GET_SOURCE = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_GET, PTLRPC_BULK_PUT_SINK = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_PUT, PTLRPC_BULK_GET_SINK = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_GET, @@ -1366,18 +1364,6 @@ static inline bool ptlrpc_is_bulk_put_source(enum ptlrpc_bulk_op_type type) return (type & PTLRPC_BULK_PUT_SOURCE) == PTLRPC_BULK_PUT_SOURCE; } -static inline bool ptlrpc_is_bulk_desc_kvec(enum ptlrpc_bulk_op_type type) -{ - return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV)) - == PTLRPC_BULK_BUF_KVEC; -} - -static inline bool ptlrpc_is_bulk_desc_kiov(enum ptlrpc_bulk_op_type type) -{ - return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV)) - == PTLRPC_BULK_BUF_KIOV; -} - static inline bool ptlrpc_is_bulk_op_active(enum ptlrpc_bulk_op_type type) { return ((type & PTLRPC_BULK_OP_ACTIVE) | @@ -1417,7 +1403,6 @@ struct ptlrpc_bulk_frag_ops { extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops; extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops; -extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops; /* * Definition of bulk descriptor. @@ -2150,8 +2135,6 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, const struct ptlrpc_bulk_frag_ops *ops); -int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc, - void *frag, int len); void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, int pageoffset, int len, int pin); diff --git a/lustre/mdc/mdc_request.c b/lustre/mdc/mdc_request.c index cc71e1b..6c7163f 100644 --- a/lustre/mdc/mdc_request.c +++ b/lustre/mdc/mdc_request.c @@ -1060,7 +1060,7 @@ restart_bulk: ptlrpc_at_set_req_timeout(req); desc = ptlrpc_prep_bulk_imp(req, npages, 1, - PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_PUT_SINK, MDS_BULK_PORTAL, &ptlrpc_bulk_kiov_pin_ops); if (desc == NULL) { diff --git a/lustre/mgc/mgc_request.c b/lustre/mgc/mgc_request.c index 077f740..b042c7f 100644 --- a/lustre/mgc/mgc_request.c +++ b/lustre/mgc/mgc_request.c @@ -1714,7 +1714,7 @@ again: /* allocate bulk transfer descriptor */ desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, - PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_PUT_SINK, MGS_BULK_PORTAL, &ptlrpc_bulk_kiov_pin_ops); if (desc == NULL) diff --git a/lustre/mgs/mgs_nids.c b/lustre/mgs/mgs_nids.c index 1e19589..5b34d14 100644 --- a/lustre/mgs/mgs_nids.c +++ b/lustre/mgs/mgs_nids.c @@ -655,8 +655,7 @@ int mgs_get_ir_logs(struct ptlrpc_request *req) page_count = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; LASSERT(page_count <= nrpages); desc = ptlrpc_prep_bulk_exp(req, page_count, 1, - PTLRPC_BULK_PUT_SOURCE | - PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_PUT_SOURCE, MGS_BULK_PORTAL, &ptlrpc_bulk_kiov_pin_ops); if (!desc) diff --git a/lustre/osc/osc_page.c b/lustre/osc/osc_page.c index acce0ed..9dbad53 100644 --- a/lustre/osc/osc_page.c +++ b/lustre/osc/osc_page.c @@ -885,7 +885,6 @@ static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc, int i; if (desc != NULL) { - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); page_count = desc->bd_iov_count; } else { page_count = aa->aa_page_count; diff --git a/lustre/osc/osc_request.c b/lustre/osc/osc_request.c index bd28d27..63532c0 100644 --- a/lustre/osc/osc_request.c +++ b/lustre/osc/osc_request.c @@ -1360,8 +1360,7 @@ osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa, desc = ptlrpc_prep_bulk_imp(req, page_count, cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS, (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE : - PTLRPC_BULK_PUT_SINK) | - PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_PUT_SINK), OST_BULK_PORTAL, &ptlrpc_bulk_kiov_pin_ops); diff --git a/lustre/osp/osp_md_object.c b/lustre/osp/osp_md_object.c index 37ce566..61d0e82 100644 --- a/lustre/osp/osp_md_object.c +++ b/lustre/osp/osp_md_object.c @@ -1240,7 +1240,7 @@ static ssize_t osp_md_read(const struct lu_env *env, struct dt_object *dt, /* allocate bulk descriptor */ desc = ptlrpc_prep_bulk_imp(req, pages, 1, - PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_PUT_SINK, MDS_BULK_PORTAL, &ptlrpc_bulk_kiov_nopin_ops); if (desc == NULL) diff --git a/lustre/osp/osp_object.c b/lustre/osp/osp_object.c index 336eb54..90a1905 100644 --- a/lustre/osp/osp_object.c +++ b/lustre/osp/osp_object.c @@ -1844,7 +1844,7 @@ static int osp_it_fetch(const struct lu_env *env, struct osp_it *it) ptlrpc_at_set_req_timeout(req); desc = ptlrpc_prep_bulk_imp(req, npages, 1, - PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_PUT_SINK, MDS_BULK_PORTAL, &ptlrpc_bulk_kiov_pin_ops); if (desc == NULL) diff --git a/lustre/osp/osp_trans.c b/lustre/osp/osp_trans.c index aa5ba5d..f156b3a 100644 --- a/lustre/osp/osp_trans.c +++ b/lustre/osp/osp_trans.c @@ -426,7 +426,7 @@ int osp_prep_update_req(const struct lu_env *env, struct obd_import *imp, req->rq_bulk_write = 1; desc = ptlrpc_prep_bulk_imp(req, page_count, MD_MAX_BRW_SIZE >> LNET_MTU_BITS, - PTLRPC_BULK_GET_SOURCE | PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_GET_SOURCE, MDS_BULK_PORTAL, &ptlrpc_bulk_kiov_nopin_ops); if (desc == NULL) GOTO(out_req, rc = -ENOMEM); diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index aa4c9b0..740019a 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -104,11 +104,6 @@ const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = { }; EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops); -const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops = { - .add_iov_frag = ptlrpc_prep_bulk_frag, -}; -EXPORT_SYMBOL(ptlrpc_bulk_kvec_ops); - static int ptlrpc_send_new_req(struct ptlrpc_request *req); static int ptlrpcd_check_work(struct ptlrpc_request *req); static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async); @@ -172,26 +167,16 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags, struct ptlrpc_bulk_desc *desc; int i; - /* ensure that only one of KIOV or IOVEC is set but not both */ - LASSERT((ptlrpc_is_bulk_desc_kiov(type) && - ops->add_kiov_frag != NULL) || - (ptlrpc_is_bulk_desc_kvec(type) && - ops->add_iov_frag != NULL)); + LASSERT(ops->add_kiov_frag != NULL); OBD_ALLOC_PTR(desc); if (!desc) return NULL; - if (type & PTLRPC_BULK_BUF_KIOV) { - OBD_ALLOC_LARGE(GET_KIOV(desc), - nfrags * sizeof(*GET_KIOV(desc))); - if (!GET_KIOV(desc)) - goto out; - } else { - OBD_ALLOC_LARGE(GET_KVEC(desc), - nfrags * sizeof(*GET_KVEC(desc))); - if (!GET_KVEC(desc)) - goto out; - } + + OBD_ALLOC_LARGE(GET_KIOV(desc), + nfrags * sizeof(*GET_KIOV(desc))); + if (!GET_KIOV(desc)) + goto out; spin_lock_init(&desc->bd_lock); init_waitqueue_head(&desc->bd_waitq); @@ -265,7 +250,6 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, LASSERT(pageoffset >= 0); LASSERT(len > 0); LASSERT(pageoffset + len <= PAGE_SIZE); - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); kiov = &BD_GET_KIOV(desc, desc->bd_iov_count); @@ -282,31 +266,6 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, } EXPORT_SYMBOL(__ptlrpc_prep_bulk_page); -int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc, - void *frag, int len) -{ - struct kvec *iovec; - - ENTRY; - - LASSERT(desc->bd_iov_count < desc->bd_max_iov); - LASSERT(frag != NULL); - LASSERT(len > 0); - LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type)); - - iovec = &BD_GET_KVEC(desc, desc->bd_iov_count); - - desc->bd_nob += len; - - iovec->iov_base = frag; - iovec->iov_len = len; - - desc->bd_iov_count++; - - RETURN(desc->bd_nob); -} -EXPORT_SYMBOL(ptlrpc_prep_bulk_frag); - void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) { ENTRY; @@ -317,8 +276,7 @@ void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); LASSERT(desc->bd_frag_ops != NULL); - if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) - sptlrpc_enc_pool_put_pages(desc); + sptlrpc_enc_pool_put_pages(desc); if (desc->bd_export) class_export_put(desc->bd_export); @@ -328,12 +286,8 @@ void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) if (desc->bd_frag_ops->release_frags != NULL) desc->bd_frag_ops->release_frags(desc); - if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) - OBD_FREE_LARGE(GET_KIOV(desc), - desc->bd_max_iov * sizeof(*GET_KIOV(desc))); - else - OBD_FREE_LARGE(GET_KVEC(desc), - desc->bd_max_iov * sizeof(*GET_KVEC(desc))); + OBD_FREE_LARGE(GET_KIOV(desc), + desc->bd_max_iov * sizeof(*GET_KIOV(desc))); OBD_FREE_PTR(desc); EXIT; } diff --git a/lustre/ptlrpc/gss/gss_bulk.c b/lustre/ptlrpc/gss/gss_bulk.c index 041dd12..b418ea7 100644 --- a/lustre/ptlrpc/gss/gss_bulk.c +++ b/lustre/ptlrpc/gss/gss_bulk.c @@ -69,7 +69,6 @@ int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx, LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_read || req->rq_bulk_write); - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); gctx = container_of(ctx, struct gss_cli_ctx, gc_base); LASSERT(gctx->gc_mechctx); @@ -174,7 +173,6 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_read || req->rq_bulk_write); - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { case SPTLRPC_SVC_NULL: @@ -378,7 +376,6 @@ int gss_svc_unwrap_bulk(struct ptlrpc_request *req, LASSERT(req->rq_svc_ctx); LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_write); - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx); @@ -456,7 +453,6 @@ int gss_svc_wrap_bulk(struct ptlrpc_request *req, LASSERT(req->rq_svc_ctx); LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_read); - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx); diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index e2ce12f..5ff7b3b 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -681,7 +681,6 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, struct sg_table sg_src, sg_dst; int blocksize, i, rc, nob = 0; - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(desc->bd_iov_count); LASSERT(GET_ENC_KIOV(desc)); @@ -802,7 +801,6 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, int ct_nob = 0, pt_nob = 0; int blocksize, i, rc; - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(desc->bd_iov_count); LASSERT(GET_ENC_KIOV(desc)); LASSERT(desc->bd_nob_transferred); @@ -1108,7 +1106,6 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx, struct krb5_ctx *kctx = gctx->internal_ctx_id; int blocksize, i; - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(desc->bd_iov_count); LASSERT(GET_ENC_KIOV(desc)); LASSERT(kctx->kc_keye.kb_tfm); @@ -1152,7 +1149,6 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, int rc = 0; u32 major; - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(ke); LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK); @@ -1434,7 +1430,6 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, int rc; __u32 major; - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(ke); if (token->len < sizeof(*khdr)) { diff --git a/lustre/ptlrpc/nodemap_storage.c b/lustre/ptlrpc/nodemap_storage.c index 0a7602e..6790ee8 100644 --- a/lustre/ptlrpc/nodemap_storage.c +++ b/lustre/ptlrpc/nodemap_storage.c @@ -1520,8 +1520,7 @@ int nodemap_get_config_req(struct obd_device *mgs_obd, page_count = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; LASSERT(page_count <= rdpg.rp_count); desc = ptlrpc_prep_bulk_exp(req, page_count, 1, - PTLRPC_BULK_PUT_SOURCE | - PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_PUT_SOURCE, MGS_BULK_PORTAL, frag_ops); if (desc == NULL) GOTO(out, rc = -ENOMEM); diff --git a/lustre/ptlrpc/pers.c b/lustre/ptlrpc/pers.c index e899d79..18fb720 100644 --- a/lustre/ptlrpc/pers.c +++ b/lustre/ptlrpc/pers.c @@ -54,21 +54,12 @@ void ptlrpc_fill_bulk_md(struct lnet_md *md, struct ptlrpc_bulk_desc *desc, md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV); md->length = min_t(unsigned int, LNET_MAX_IOV, md->length); - if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) { - md->options |= LNET_MD_KIOV; - if (GET_ENC_KIOV(desc)) - md->start = &BD_GET_ENC_KIOV(desc, mdidx * - LNET_MAX_IOV); - else - md->start = &BD_GET_KIOV(desc, mdidx * LNET_MAX_IOV); - } else if (ptlrpc_is_bulk_desc_kvec(desc->bd_type)) { - md->options |= LNET_MD_IOVEC; - if (GET_ENC_KVEC(desc)) - md->start = &BD_GET_ENC_KVEC(desc, mdidx * - LNET_MAX_IOV); - else - md->start = &BD_GET_KVEC(desc, mdidx * LNET_MAX_IOV); - } + md->options |= LNET_MD_KIOV; + if (GET_ENC_KIOV(desc)) + md->start = &BD_GET_ENC_KIOV(desc, mdidx * + LNET_MAX_IOV); + else + md->start = &BD_GET_KIOV(desc, mdidx * LNET_MAX_IOV); } diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c index e6c1df1..6c5d64f 100644 --- a/lustre/ptlrpc/sec_bulk.c +++ b/lustre/ptlrpc/sec_bulk.c @@ -538,7 +538,6 @@ int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc) int p_idx, g_idx; int i; - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(desc->bd_iov_count > 0); LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages); @@ -669,8 +668,6 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) int p_idx, g_idx; int i; - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); - if (GET_ENC_KIOV(desc) == NULL) return; @@ -913,7 +910,6 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, unsigned int bufsize; int i, err; - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX); LASSERT(buflen >= 4); diff --git a/lustre/ptlrpc/sec_plain.c b/lustre/ptlrpc/sec_plain.c index 151bb64..82310b6 100644 --- a/lustre/ptlrpc/sec_plain.c +++ b/lustre/ptlrpc/sec_plain.c @@ -155,8 +155,6 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc) char *ptr; unsigned int off, i; - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); - for (i = 0; i < desc->bd_iov_count; i++) { if (BD_GET_KIOV(desc, i).kiov_len == 0) continue; @@ -342,7 +340,6 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, int rc; int i, nob; - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(req->rq_pack_bulk); LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS); LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS); diff --git a/lustre/quota/qsd_request.c b/lustre/quota/qsd_request.c index cf5d273..b4243ff 100644 --- a/lustre/quota/qsd_request.c +++ b/lustre/quota/qsd_request.c @@ -387,7 +387,7 @@ int qsd_fetch_index(const struct lu_env *env, struct obd_export *exp, /* allocate bulk descriptor */ desc = ptlrpc_prep_bulk_imp(req, npages, 1, - PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_PUT_SINK, MDS_BULK_PORTAL, &ptlrpc_bulk_kiov_pin_ops); if (desc == NULL) diff --git a/lustre/target/out_handler.c b/lustre/target/out_handler.c index 6f5dd68..2849b7b 100644 --- a/lustre/target/out_handler.c +++ b/lustre/target/out_handler.c @@ -1017,8 +1017,7 @@ int out_handle(struct tgt_session_info *tsi) desc = ptlrpc_prep_bulk_exp(pill->rc_req, page_count, PTLRPC_BULK_OPS_COUNT, - PTLRPC_BULK_GET_SINK | - PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_GET_SINK, MDS_BULK_PORTAL, &ptlrpc_bulk_kiov_nopin_ops); if (desc == NULL) diff --git a/lustre/target/tgt_handler.c b/lustre/target/tgt_handler.c index df2b1c6..2cb7359 100644 --- a/lustre/target/tgt_handler.c +++ b/lustre/target/tgt_handler.c @@ -1082,7 +1082,7 @@ int tgt_send_buffer(struct tgt_session_info *tsi, struct lu_rdbuf *rdbuf) } desc = ptlrpc_prep_bulk_exp(req, pages, 1, - PTLRPC_BULK_PUT_SOURCE | PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_PUT_SOURCE, MDS_BULK_PORTAL, &ptlrpc_bulk_kiov_nopin_ops); if (desc == NULL) @@ -1112,8 +1112,7 @@ int tgt_sendpage(struct tgt_session_info *tsi, struct lu_rdpg *rdpg, int nob) ENTRY; desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1, - PTLRPC_BULK_PUT_SOURCE | - PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_PUT_SOURCE, MDS_BULK_PORTAL, &ptlrpc_bulk_kiov_pin_ops); if (desc == NULL) @@ -2245,8 +2244,7 @@ int tgt_brw_read(struct tgt_session_info *tsi) desc = NULL; } else { desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo), - PTLRPC_BULK_PUT_SOURCE | - PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_PUT_SOURCE, OST_BULK_PORTAL, &ptlrpc_bulk_kiov_nopin_ops); if (desc == NULL) @@ -2604,8 +2602,7 @@ int tgt_brw_write(struct tgt_session_info *tsi) desc = NULL; } else { desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo), - PTLRPC_BULK_GET_SINK | - PTLRPC_BULK_BUF_KIOV, + PTLRPC_BULK_GET_SINK, OST_BULK_PORTAL, &ptlrpc_bulk_kiov_nopin_ops); if (desc == NULL) -- 1.8.3.1