From 68d19d5a0a772a002054d516f99464db4a478556 Mon Sep 17 00:00:00 2001 From: Qian Yingjin Date: Mon, 17 Apr 2017 16:05:30 +0800 Subject: [PATCH] LU-9356 osd-ldiskfs: add blk_plug when do bio During 16MB bulk RPC I/O evaluation on rhel7, due to kernel BIO_MAX_PAGES (256) limit, the 16MB IO is divided into 16 1MB I/O submitting to underly block device one by one. And we found that the SFA disk driver got lots of 1MB IOs. To optimize the performance, this patch introduces blk_plug into osd-ldiskfs when do bio, before submit IOs, it calls blk_start_plug, after submit all 16MB IOs, calls blk_finish_plug, so that the 16MB bulk IO will have more change to merge in the block evelvator scheduler layer. Signed-off-by: Qian Yingjin Change-Id: If26db9f85baf97bc441cc4ad19d5c9f97bd3d7e5 Reviewed-on: https://review.whamcloud.com/26697 Tested-by: Jenkins Reviewed-by: Alex Zhuravlev Tested-by: Maloo Reviewed-by: Li Xi Reviewed-by: Oleg Drokin --- lustre/autoconf/lustre-core.m4 | 21 +++++++++++++++++++++ lustre/include/lustre_compat.h | 8 ++++++++ lustre/osd-ldiskfs/osd_io.c | 4 ++++ 3 files changed, 33 insertions(+) diff --git a/lustre/autoconf/lustre-core.m4 b/lustre/autoconf/lustre-core.m4 index 2ea8caa..8ee15f1 100644 --- a/lustre/autoconf/lustre-core.m4 +++ b/lustre/autoconf/lustre-core.m4 @@ -687,6 +687,26 @@ LB_CHECK_EXPORT([simple_setattr], [fs/libfs.c], ]) # LC_EXPORT_SIMPLE_SETATTR # +# LC_HAVE_BLK_PLUG +# +# 2.6.38 add struct blk_plug +# +AC_DEFUN([LC_HAVE_BLK_PLUG], [ +LB_CHECK_COMPILE([if 'struct blk_plug' exists], +blk_plug, [ + #include +],[ + struct blk_plug plug; + + blk_start_plug(&plug); + blk_finish_plug(&plug); +],[ + AC_DEFINE(HAVE_BLK_PLUG, 1, + [blk_plug struct exists]) +]) +]) # LC_HAVE_BLK_PLUG + +# # LC_IOP_TRUNCATE # # truncate callback removed since 2.6.39 @@ -2369,6 +2389,7 @@ AC_DEFUN([LC_PROG_LINUX], [ LC_INODE_I_RCU LC_D_COMPARE_7ARGS LC_D_DELETE_CONST + LC_HAVE_BLK_PLUG # 2.6.39 LC_REQUEST_QUEUE_UNPLUG_FN diff --git a/lustre/include/lustre_compat.h b/lustre/include/lustre_compat.h index 8fd5821..fa0a256 100644 --- a/lustre/include/lustre_compat.h +++ b/lustre/include/lustre_compat.h @@ -179,6 +179,14 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt, #define queue_max_hw_segments(rq) queue_max_segments(rq) #endif +#ifdef HAVE_BLK_PLUG +#define DECLARE_PLUG(plug) struct blk_plug plug +#else /* !HAVE_BLK_PLUG */ +#define DECLARE_PLUG(name) +#define blk_start_plug(plug) do {} while (0) +#define blk_finish_plug(plug) do {} while (0) +#endif + #ifdef HAVE_KMAP_ATOMIC_HAS_1ARG #define ll_kmap_atomic(a, b) kmap_atomic(a) #define ll_kunmap_atomic(a, b) kunmap_atomic(a) diff --git a/lustre/osd-ldiskfs/osd_io.c b/lustre/osd-ldiskfs/osd_io.c index cffb6c6..7a802d3 100644 --- a/lustre/osd-ldiskfs/osd_io.c +++ b/lustre/osd-ldiskfs/osd_io.c @@ -277,6 +277,7 @@ static int osd_do_bio(struct osd_device *osd, struct inode *inode, int page_idx; int i; int rc = 0; + DECLARE_PLUG(plug); ENTRY; LASSERT(iobuf->dr_npages == npages); @@ -284,6 +285,7 @@ static int osd_do_bio(struct osd_device *osd, struct inode *inode, osd_brw_stats_update(osd, iobuf); iobuf->dr_start_time = cfs_time_current(); + blk_start_plug(&plug); for (page_idx = 0, block_idx = 0; page_idx < npages; page_idx++, block_idx += blocks_per_page) { @@ -370,6 +372,8 @@ static int osd_do_bio(struct osd_device *osd, struct inode *inode, } out: + blk_finish_plug(&plug); + /* in order to achieve better IO throughput, we don't wait for writes * completion here. instead we proceed with transaction commit in * parallel and wait for IO completion once transaction is stopped -- 1.8.3.1