Whamcloud - gitweb
LU-13004 osp: break read request into pages. 25/36825/8
authorMr NeilBrown <neilb@suse.de>
Tue, 28 Jan 2020 13:46:51 +0000 (08:46 -0500)
committerOleg Drokin <green@whamcloud.com>
Fri, 14 Feb 2020 05:50:28 +0000 (05:50 +0000)
Rather than breaking up a read request into arbitrarily
sized (4K) pieces of memory in virtual address space,
break it up into pages (which might be 64K) and
use a kiov rather than kvec to manage them.

This is a step towards removing kvec suport and
standardizing on kiov.

Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: If688764c53066a9c4db212682085fa899d4dde1b
Reviewed-on: https://review.whamcloud.com/36825
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Mike Pershin <mpershin@whamcloud.com>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Alex Zhuravlev <bzzz@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/osp/osp_md_object.c

index 849fc83..37ce566 100644 (file)
@@ -1207,9 +1207,7 @@ static ssize_t osp_md_read(const struct lu_env *env, struct dt_object *dt,
        struct out_read_reply *orr;
        struct ptlrpc_bulk_desc *desc;
        struct object_update_reply *reply;
        struct out_read_reply *orr;
        struct ptlrpc_bulk_desc *desc;
        struct object_update_reply *reply;
-       __u32 left_size;
-       int nbufs;
-       int i;
+       int pages;
        int rc;
        ENTRY;
 
        int rc;
        ENTRY;
 
@@ -1237,26 +1235,18 @@ static ssize_t osp_md_read(const struct lu_env *env, struct dt_object *dt,
        if (rc != 0)
                GOTO(out_update, rc);
 
        if (rc != 0)
                GOTO(out_update, rc);
 
-       nbufs = (rbuf->lb_len + OUT_BULK_BUFFER_SIZE - 1) /
-                                       OUT_BULK_BUFFER_SIZE;
+       /* First *and* last might be partial pages, hence +1 */
+       pages = DIV_ROUND_UP(rbuf->lb_len, PAGE_SIZE) + 1;
+
        /* allocate bulk descriptor */
        /* allocate bulk descriptor */
-       desc = ptlrpc_prep_bulk_imp(req, nbufs, 1,
-                                   PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KVEC,
-                                   MDS_BULK_PORTAL, &ptlrpc_bulk_kvec_ops);
+       desc = ptlrpc_prep_bulk_imp(req, pages, 1,
+                                   PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+                                   MDS_BULK_PORTAL,
+                                   &ptlrpc_bulk_kiov_nopin_ops);
        if (desc == NULL)
                GOTO(out, rc = -ENOMEM);
 
        if (desc == NULL)
                GOTO(out, rc = -ENOMEM);
 
-       /* split the buffer into small chunk size */
-       left_size = rbuf->lb_len;
-       for (i = 0; i < nbufs; i++) {
-               int read_size;
-
-               read_size = left_size > OUT_BULK_BUFFER_SIZE ?
-                               OUT_BULK_BUFFER_SIZE : left_size;
-               desc->bd_frag_ops->add_iov_frag(desc, ptr, read_size);
-
-               ptr += read_size;
-       }
+       desc->bd_frag_ops->add_iov_frag(desc, ptr, rbuf->lb_len);
 
        osp_set_req_replay(osp, req);
        req->rq_bulk_read = 1;
 
        osp_set_req_replay(osp, req);
        req->rq_bulk_read = 1;