Whamcloud - gitweb
LU-12043 llite: extend readahead locks for striped file 38/35438/7
authorWang Shilong <wshilong@ddn.com>
Thu, 8 Aug 2019 17:07:21 +0000 (13:07 -0400)
committerOleg Drokin <green@whamcloud.com>
Mon, 16 Sep 2019 23:03:08 +0000 (23:03 +0000)
Currently cl_io_read_ahead() can not return locks
that cross stripe boundary at one time, thus readahead
will stop because of this reason.

This is really bad, as we will stop readahead every
time we hit stripe boundary, for example default stripe
size is 1M, this could hurt performances very much
especially with async readahead introduced.

So try to use existed locks aggressivly if there is no
lock contention, otherwise lock should be not
less than requested extent.

Change-Id: I8b2dcd0e80138ea530272cab6a665981aa00cca8
Signed-off-by: Wang Shilong <wshilong@ddn.com>
Reviewed-on: https://review.whamcloud.com/35438
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Li Xi <lixi@ddn.com>
Reviewed-by: Patrick Farrell <pfarrell@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/include/cl_object.h
lustre/llite/rw.c
lustre/osc/osc_io.c

index 0524f34..4b13504 100644 (file)
@@ -1484,6 +1484,8 @@ struct cl_read_ahead {
        void    (*cra_release)(const struct lu_env *env, void *cbdata);
        /* Callback data for cra_release routine */
        void    *cra_cbdata;
+       /* whether lock is in contention */
+       bool    cra_contention;
 };
 
 static inline void cl_read_ahead_release(const struct lu_env *env,
index cda05f6..0f86103 100644 (file)
@@ -364,6 +364,17 @@ ll_read_ahead_pages(const struct lu_env *env, struct cl_io *io,
                                if (rc < 0)
                                        break;
 
+                               /* Do not shrink the ria_end at any case until
+                                * the minimum end of current read is covered.
+                                * And only shrink the ria_end if the matched
+                                * LDLM lock doesn't cover more. */
+                               if (page_idx > ra.cra_end ||
+                                   (ra.cra_contention &&
+                                    page_idx > ria->ria_end_min)) {
+                                       ria->ria_end = ra.cra_end;
+                                       break;
+                               }
+
                                CDEBUG(D_READA, "idx: %lu, ra: %lu, rpc: %lu\n",
                                       page_idx, ra.cra_end, ra.cra_rpc_size);
                                LASSERTF(ra.cra_end >= page_idx,
@@ -380,8 +391,6 @@ ll_read_ahead_pages(const struct lu_env *env, struct cl_io *io,
                                        ria->ria_end = end - 1;
                                if (ria->ria_end < ria->ria_end_min)
                                        ria->ria_end = ria->ria_end_min;
-                               if (ria->ria_end > ra.cra_end)
-                                       ria->ria_end = ra.cra_end;
                        }
                        if (page_idx > ria->ria_end)
                                break;
index 4a1ac52..0d2bf4f 100644 (file)
@@ -91,6 +91,8 @@ static int osc_io_read_ahead(const struct lu_env *env,
                                       dlmlock->l_policy_data.l_extent.end);
                ra->cra_release = osc_read_ahead_release;
                ra->cra_cbdata = dlmlock;
+               if (ra->cra_end != CL_PAGE_EOF)
+                       ra->cra_contention = true;
                result = 0;
        }