]) # LC_EXPORT_SIMPLE_SETATTR
#
+# LC_HAVE_BLK_PLUG
+#
+# 2.6.38 add struct blk_plug
+#
+AC_DEFUN([LC_HAVE_BLK_PLUG], [
+LB_CHECK_COMPILE([if 'struct blk_plug' exists],
+blk_plug, [
+ #include <linux/blkdev.h>
+],[
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
+ blk_finish_plug(&plug);
+],[
+ AC_DEFINE(HAVE_BLK_PLUG, 1,
+ [blk_plug struct exists])
+])
+]) # LC_HAVE_BLK_PLUG
+
+#
# LC_IOP_TRUNCATE
#
# truncate callback removed since 2.6.39
LC_INODE_I_RCU
LC_D_COMPARE_7ARGS
LC_D_DELETE_CONST
+ LC_HAVE_BLK_PLUG
# 2.6.39
LC_REQUEST_QUEUE_UNPLUG_FN
#define queue_max_hw_segments(rq) queue_max_segments(rq)
#endif
+#ifdef HAVE_BLK_PLUG
+#define DECLARE_PLUG(plug) struct blk_plug plug
+#else /* !HAVE_BLK_PLUG */
+#define DECLARE_PLUG(name)
+#define blk_start_plug(plug) do {} while (0)
+#define blk_finish_plug(plug) do {} while (0)
+#endif
+
#ifdef HAVE_KMAP_ATOMIC_HAS_1ARG
#define ll_kmap_atomic(a, b) kmap_atomic(a)
#define ll_kunmap_atomic(a, b) kunmap_atomic(a)
int page_idx;
int i;
int rc = 0;
+ DECLARE_PLUG(plug);
ENTRY;
LASSERT(iobuf->dr_npages == npages);
osd_brw_stats_update(osd, iobuf);
iobuf->dr_start_time = cfs_time_current();
+ blk_start_plug(&plug);
for (page_idx = 0, block_idx = 0;
page_idx < npages;
page_idx++, block_idx += blocks_per_page) {
}
out:
+ blk_finish_plug(&plug);
+
/* in order to achieve better IO throughput, we don't wait for writes
* completion here. instead we proceed with transaction commit in
* parallel and wait for IO completion once transaction is stopped