Whamcloud - gitweb
LU-9356 osd-ldiskfs: add blk_plug when do bio 97/26697/4
authorQian Yingjin <qian@ddn.com>
Mon, 17 Apr 2017 08:05:30 +0000 (16:05 +0800)
committerOleg Drokin <oleg.drokin@intel.com>
Fri, 28 Apr 2017 20:36:34 +0000 (20:36 +0000)
During 16MB bulk RPC I/O evaluation on rhel7, due to kernel
BIO_MAX_PAGES (256) limit, the 16MB IO is divided into 16 1MB
I/O submitting to underly block device one by one. And we found
that the SFA disk driver got lots of 1MB IOs.
To optimize the performance, this patch introduces blk_plug into
osd-ldiskfs when do bio, before submit IOs, it calls blk_start_plug,
after submit all 16MB IOs, calls blk_finish_plug, so that the 16MB
bulk IO will have more change to merge in the block evelvator
scheduler layer.

Signed-off-by: Qian Yingjin <qian@ddn.com>
Change-Id: If26db9f85baf97bc441cc4ad19d5c9f97bd3d7e5
Reviewed-on: https://review.whamcloud.com/26697
Tested-by: Jenkins
Reviewed-by: Alex Zhuravlev <alexey.zhuravlev@intel.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Li Xi <lixi@ddn.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lustre/autoconf/lustre-core.m4
lustre/include/lustre_compat.h
lustre/osd-ldiskfs/osd_io.c

index 2ea8caa..8ee15f1 100644 (file)
@@ -687,6 +687,26 @@ LB_CHECK_EXPORT([simple_setattr], [fs/libfs.c],
 ]) # LC_EXPORT_SIMPLE_SETATTR
 
 #
+# LC_HAVE_BLK_PLUG
+#
+# 2.6.38 add struct blk_plug
+#
+AC_DEFUN([LC_HAVE_BLK_PLUG], [
+LB_CHECK_COMPILE([if 'struct blk_plug' exists],
+blk_plug, [
+       #include <linux/blkdev.h>
+],[
+       struct blk_plug plug;
+
+       blk_start_plug(&plug);
+       blk_finish_plug(&plug);
+],[
+       AC_DEFINE(HAVE_BLK_PLUG, 1,
+               [blk_plug struct exists])
+])
+]) # LC_HAVE_BLK_PLUG
+
+#
 # LC_IOP_TRUNCATE
 #
 # truncate callback removed since 2.6.39
@@ -2369,6 +2389,7 @@ AC_DEFUN([LC_PROG_LINUX], [
        LC_INODE_I_RCU
        LC_D_COMPARE_7ARGS
        LC_D_DELETE_CONST
+       LC_HAVE_BLK_PLUG
 
        # 2.6.39
        LC_REQUEST_QUEUE_UNPLUG_FN
index 8fd5821..fa0a256 100644 (file)
@@ -179,6 +179,14 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
 #define queue_max_hw_segments(rq)         queue_max_segments(rq)
 #endif
 
+#ifdef HAVE_BLK_PLUG
+#define DECLARE_PLUG(plug)     struct blk_plug plug
+#else /* !HAVE_BLK_PLUG */
+#define DECLARE_PLUG(name)
+#define blk_start_plug(plug)   do {} while (0)
+#define blk_finish_plug(plug)  do {} while (0)
+#endif
+
 #ifdef HAVE_KMAP_ATOMIC_HAS_1ARG
 #define ll_kmap_atomic(a, b)   kmap_atomic(a)
 #define ll_kunmap_atomic(a, b) kunmap_atomic(a)
index cffb6c6..7a802d3 100644 (file)
@@ -277,6 +277,7 @@ static int osd_do_bio(struct osd_device *osd, struct inode *inode,
        int            page_idx;
        int            i;
        int            rc = 0;
+       DECLARE_PLUG(plug);
        ENTRY;
 
         LASSERT(iobuf->dr_npages == npages);
@@ -284,6 +285,7 @@ static int osd_do_bio(struct osd_device *osd, struct inode *inode,
         osd_brw_stats_update(osd, iobuf);
         iobuf->dr_start_time = cfs_time_current();
 
+       blk_start_plug(&plug);
         for (page_idx = 0, block_idx = 0;
              page_idx < npages;
              page_idx++, block_idx += blocks_per_page) {
@@ -370,6 +372,8 @@ static int osd_do_bio(struct osd_device *osd, struct inode *inode,
        }
 
 out:
+       blk_finish_plug(&plug);
+
        /* in order to achieve better IO throughput, we don't wait for writes
         * completion here. instead we proceed with transaction commit in
         * parallel and wait for IO completion once transaction is stopped