Whamcloud - gitweb
LU-15 slow IO with read-intense application
authoryangsheng <ys@whamcloud.com>
Wed, 17 Aug 2011 18:43:49 +0000 (02:43 +0800)
committerOleg Drokin <green@whamcloud.com>
Tue, 25 Oct 2011 22:15:55 +0000 (18:15 -0400)
Align the readahead extent by 1M after when it is trimed by ra_max_pages.

Change-Id: I4102d2fe956fd01457949f0eb7c63654b5c2d095
signed-off-by: Wang Di <di.wang@whamcloud.com>
signed-off-by: Yang Sheng <ys@whamcloud.com>
Reviewed-on: http://review.whamcloud.com/1255
Tested-by: Hudson
Reviewed-by: Jinshan Xiong <jay@whamcloud.com>
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/llite/rw.c
lustre/llite/vvp_io.c

index d5e012e..f8fab63 100644 (file)
@@ -338,7 +338,8 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
  * ll_ra_count_get at the exactly same time. All of them will get a zero ra
  * window, although the global window is 100M. -jay
  */
-static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
+static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, struct ra_io_arg *ria,
+                                     unsigned long len)
 {
         struct ll_ra_info *ra = &sbi->ll_ra_info;
         unsigned long ret;
@@ -349,14 +350,23 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
          * otherwise it will form small read RPC(< 1M), which hurt server
          * performance a lot.
          */
+        if (ra->ra_max_pages < atomic_read(&ra->ra_cur_pages))
+                GOTO(out, ret = 0);
+
         ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), len);
         if ((int)ret < 0 || ret < min((unsigned long)PTLRPC_MAX_BRW_PAGES, len))
                 GOTO(out, ret = 0);
 
+        if (ria->ria_pages == 0)
+                /* it needs 1M align again after trimed by ra_max_pages*/
+                if (ret >= ((ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES))
+                        ret -= (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES;
+
         if (cfs_atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
                 cfs_atomic_sub(ret, &ra->ra_cur_pages);
                 ret = 0;
         }
+
 out:
         RETURN(ret);
 }
@@ -787,8 +797,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
         if (len == 0)
                 RETURN(0);
 
-        reserved = ll_ra_count_get(ll_i2sbi(inode), len);
-
+        reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len);
         if (reserved < len)
                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
 
index cbf02cd..9f92f9e 100644 (file)
@@ -792,7 +792,8 @@ static int vvp_io_read_page(const struct lu_env *env,
 
         ENTRY;
 
-        if (sbi->ll_ra_info.ra_max_pages_per_file)
+        if (sbi->ll_ra_info.ra_max_pages_per_file &&
+            sbi->ll_ra_info.ra_max_pages)
                 ras_update(sbi, inode, ras, page->cp_index,
                            cp->cpg_defer_uptodate);
 
@@ -815,7 +816,8 @@ static int vvp_io_read_page(const struct lu_env *env,
          * this will unlock it automatically as part of cl_page_list_disown().
          */
         cl_2queue_add(queue, page);
-        if (sbi->ll_ra_info.ra_max_pages_per_file)
+        if (sbi->ll_ra_info.ra_max_pages_per_file &&
+            sbi->ll_ra_info.ra_max_pages)
                 ll_readahead(env, io, ras,
                              vmpage->mapping, &queue->c2_qin, fd->fd_flags);