since a643e38 (LU-2089) OST do not pin pages involved in
BULKs: this is done to prevent get/put on the pages which
were allocated as part of order N (>1) allocation with 0
refcounter. get/put on such a page leads to warning from
the kernel. in the original patch one code path was not
fixed, so this patch completes the change.
also, to prevent confusion, the patch removes couple macros:
ptlrpc_free_bulk() and ptlrpc_prep_bulk_page(). so now the
caller should specify whether ptlrpc should reference pages
or not.
Signed-off-by: Alex Zhuravlev <alexey.zhuravlev@intel.com>
Change-Id: I8cf5f334e8f7edab0ad37678e1e8af18904a0be6
Reviewed-on: http://review.whamcloud.com/4256
Tested-by: Hudson
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Mike Pershin <tappro@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
int npages, int type, int portal);
void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
int npages, int type, int portal);
void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
-static inline void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk)
+static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
{
__ptlrpc_free_bulk(bulk, 1);
}
{
__ptlrpc_free_bulk(bulk, 1);
}
}
void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
cfs_page_t *page, int pageoffset, int len, int);
}
void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
cfs_page_t *page, int pageoffset, int len, int);
-static inline void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
- cfs_page_t *page, int pageoffset,
- int len)
+static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
+ cfs_page_t *page, int pageoffset,
+ int len)
{
__ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
}
{
__ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
}
/* NB req now owns desc and will free it when it gets freed */
for (i = 0; i < op_data->op_npages; i++)
/* NB req now owns desc and will free it when it gets freed */
for (i = 0; i < op_data->op_npages; i++)
- ptlrpc_prep_bulk_page(desc, pages[i], 0, CFS_PAGE_SIZE);
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, CFS_PAGE_SIZE);
mdc_readdir_pack(req, op_data->op_offset,
CFS_PAGE_SIZE * op_data->op_npages,
mdc_readdir_pack(req, op_data->op_offset,
CFS_PAGE_SIZE * op_data->op_npages,
for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
i++, tmpcount -= tmpsize) {
tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
i++, tmpcount -= tmpsize) {
tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
- ptlrpc_prep_bulk_page(desc, rdpg->rp_pages[i], 0, tmpsize);
+ ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize);
}
LASSERT(desc->bd_nob == nob);
rc = target_bulk_io(exp, desc, lwi);
}
LASSERT(desc->bd_nob == nob);
rc = target_bulk_io(exp, desc, lwi);
- ptlrpc_free_bulk(desc);
+ ptlrpc_free_bulk_pin(desc);
cleanup_page:
cfs_free_page(page);
desc_cleanup:
cleanup_page:
cfs_free_page(page);
desc_cleanup:
- ptlrpc_free_bulk(desc);
+ ptlrpc_free_bulk_pin(desc);
GOTO(out, rc = -ENOMEM);
for (i = 0; i < nrpages; i++)
GOTO(out, rc = -ENOMEM);
for (i = 0; i < nrpages; i++)
- ptlrpc_prep_bulk_page(desc, pages[i], 0, CFS_PAGE_SIZE);
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, CFS_PAGE_SIZE);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
GOTO(out, rc = -ENOMEM);
for (i = 0; i < page_count && bytes > 0; i++) {
GOTO(out, rc = -ENOMEM);
for (i = 0; i < page_count && bytes > 0; i++) {
- ptlrpc_prep_bulk_page(desc, pages[i], 0,
- min_t(int, bytes, CFS_PAGE_SIZE));
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0,
+ min_t(int, bytes, CFS_PAGE_SIZE));
bytes -= CFS_PAGE_SIZE;
}
rc = target_bulk_io(req->rq_export, desc, &lwi);
bytes -= CFS_PAGE_SIZE;
}
rc = target_bulk_io(req->rq_export, desc, &lwi);
- ptlrpc_free_bulk(desc);
+ ptlrpc_free_bulk_pin(desc);
out:
for (i = 0; i < nrpages; i++) {
out:
for (i = 0; i < nrpages; i++) {
LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
(pg->flag & OBD_BRW_SRVLOCK));
LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
(pg->flag & OBD_BRW_SRVLOCK));
- ptlrpc_prep_bulk_page(desc, pg->pg, poff, pg->count);
+ ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count);
requested_nob += pg->count;
if (i > 0 && can_merge_pages(pg_prev, pg)) {
requested_nob += pg->count;
if (i > 0 && can_merge_pages(pg_prev, pg)) {
lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL);
l_wait_event(waitq, 0, &lwi1);
rc = target_bulk_io(exp, desc, &lwi);
lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL);
l_wait_event(waitq, 0, &lwi1);
rc = target_bulk_io(exp, desc, &lwi);
- ptlrpc_free_bulk(desc);
+ ptlrpc_free_bulk_nopin(desc);
class_import_put(request->rq_import);
request->rq_import = NULL;
}
class_import_put(request->rq_import);
request->rq_import = NULL;
}
- if (request->rq_bulk != NULL)
- ptlrpc_free_bulk(request->rq_bulk);
+ if (request->rq_bulk != NULL)
+ ptlrpc_free_bulk_pin(request->rq_bulk);
if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL)
sptlrpc_cli_free_reqbuf(request);
if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL)
sptlrpc_cli_free_reqbuf(request);
/* req now owns desc and will free it when it gets freed */
for (i = 0; i < npages; i++)
/* req now owns desc and will free it when it gets freed */
for (i = 0; i < npages; i++)
- ptlrpc_prep_bulk_page(desc, pages[i], 0, CFS_PAGE_SIZE);
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, CFS_PAGE_SIZE);
/* pack index information in request */
req_ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);
/* pack index information in request */
req_ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);