Whamcloud - gitweb
LU-13134 obdclass: use slab allocation for cl_dio_aio
[fs/lustre-release.git] / lustre / obdclass / cl_io.c
index 3d05da9..45d9307 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -27,7 +23,7 @@
  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 #define DEBUG_SUBSYSTEM S_CLASS
 
 #include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
 #include <obd_class.h>
 #include <obd_support.h>
 #include <lustre_fid.h>
-#include <libcfs/list.h>
 #include <cl_object.h>
 #include "cl_internal.h"
 
  *
  */
 
-#define cl_io_for_each(slice, io) \
-       list_for_each_entry((slice), &io->ci_layers, cis_linkage)
-#define cl_io_for_each_reverse(slice, io)                 \
-       list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
-
 static inline int cl_io_type_is_valid(enum cl_io_type type)
 {
         return CIT_READ <= type && type < CIT_OP_NR;
@@ -71,15 +63,6 @@ static inline int cl_io_is_loopable(const struct cl_io *io)
 }
 
 /**
- * Returns true iff there is an IO ongoing in the given environment.
- */
-int cl_io_is_going(const struct lu_env *env)
-{
-        return cl_env_info(env)->clt_current_io != NULL;
-}
-EXPORT_SYMBOL(cl_io_is_going);
-
-/**
  * cl_io invariant that holds at all times when exported cl_io_*() functions
  * are entered and left.
  */
@@ -104,7 +87,6 @@ static int cl_io_invariant(const struct cl_io *io)
 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
 {
        struct cl_io_slice    *slice;
-       struct cl_thread_info *info;
 
         LINVRNT(cl_io_type_is_valid(io->ci_type));
         LINVRNT(cl_io_invariant(io));
@@ -124,16 +106,14 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
                 slice->cis_io = NULL;
         }
         io->ci_state = CIS_FINI;
-        info = cl_env_info(env);
-        if (info->clt_current_io == io)
-                info->clt_current_io = NULL;
 
        /* sanity check for layout change */
        switch(io->ci_type) {
        case CIT_READ:
        case CIT_WRITE:
-               break;
+       case CIT_DATA_VERSION:
        case CIT_FAULT:
+               break;
        case CIT_FSYNC:
                LASSERT(!io->ci_need_restart);
                break;
@@ -142,6 +122,9 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
                /* Check ignore layout change conf */
                LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
                                !io->ci_need_restart));
+       case CIT_GLIMPSE:
+               break;
+       case CIT_LADVISE:
                break;
        default:
                LBUG();
@@ -187,11 +170,8 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
                    enum cl_io_type iot, struct cl_object *obj)
 {
-        struct cl_thread_info *info = cl_env_info(env);
-
         LASSERT(obj != cl_object_top(obj));
-        if (info->clt_current_io == NULL)
-                info->clt_current_io = io;
+
         return cl_io_init0(env, io, iot, obj);
 }
 EXPORT_SYMBOL(cl_io_sub_init);
@@ -209,13 +189,12 @@ EXPORT_SYMBOL(cl_io_sub_init);
 int cl_io_init(const struct lu_env *env, struct cl_io *io,
                enum cl_io_type iot, struct cl_object *obj)
 {
-        struct cl_thread_info *info = cl_env_info(env);
+       LASSERT(obj == cl_object_top(obj));
 
-        LASSERT(obj == cl_object_top(obj));
-        LASSERT(info->clt_current_io == NULL);
+       /* clear I/O restart from previous instance */
+       io->ci_need_restart = 0;
 
-        info->clt_current_io = io;
-        return cl_io_init0(env, io, iot, obj);
+       return cl_io_init0(env, io, iot, obj);
 }
 EXPORT_SYMBOL(cl_io_init);
 
@@ -225,75 +204,36 @@ EXPORT_SYMBOL(cl_io_init);
  * \pre iot == CIT_READ || iot == CIT_WRITE
  */
 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
-                  enum cl_io_type iot, loff_t pos, size_t count)
+                 enum cl_io_type iot, loff_t pos, size_t count)
 {
-        LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
-        LINVRNT(io->ci_obj != NULL);
-        ENTRY;
+       LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
+       LINVRNT(io->ci_obj != NULL);
+       ENTRY;
 
-        LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
-                         "io range: %u ["LPU64", "LPU64") %u %u\n",
-                         iot, (__u64)pos, (__u64)pos + count,
-                         io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
-        io->u.ci_rw.crw_pos    = pos;
-        io->u.ci_rw.crw_count  = count;
-        RETURN(cl_io_init(env, io, iot, io->ci_obj));
+       LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
+                        "io range: %u [%llu, %llu) %u %u\n",
+                        iot, (__u64)pos, (__u64)pos + count,
+                        io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
+       io->u.ci_rw.crw_pos    = pos;
+       io->u.ci_rw.crw_count  = count;
+       RETURN(cl_io_init(env, io, iot, io->ci_obj));
 }
 EXPORT_SYMBOL(cl_io_rw_init);
 
-static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
-                              const struct cl_lock_descr *d1)
+static int cl_lock_descr_cmp(void *priv,
+                            struct list_head *a, struct list_head *b)
 {
+       const struct cl_io_lock_link *l0 = list_entry(a, struct cl_io_lock_link,
+                                                     cill_linkage);
+       const struct cl_io_lock_link *l1 = list_entry(b, struct cl_io_lock_link,
+                                                     cill_linkage);
+       const struct cl_lock_descr *d0 = &l0->cill_descr;
+       const struct cl_lock_descr *d1 = &l1->cill_descr;
+
        return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
                          lu_object_fid(&d1->cld_obj->co_lu));
 }
 
-/*
- * Sort locks in lexicographical order of their (fid, start-offset) pairs.
- */
-static void cl_io_locks_sort(struct cl_io *io)
-{
-        int done = 0;
-
-        ENTRY;
-        /* hidden treasure: bubble sort for now. */
-        do {
-                struct cl_io_lock_link *curr;
-                struct cl_io_lock_link *prev;
-                struct cl_io_lock_link *temp;
-
-                done = 1;
-                prev = NULL;
-
-               list_for_each_entry_safe(curr, temp, &io->ci_lockset.cls_todo,
-                                        cill_linkage) {
-                       if (prev != NULL) {
-                               switch (cl_lock_descr_sort(&prev->cill_descr,
-                                                          &curr->cill_descr)) {
-                               case 0:
-                                       /*
-                                        * IMPOSSIBLE:  Identical locks are
-                                        *              already removed at
-                                        *              this point.
-                                        */
-                               default:
-                                       LBUG();
-                               case +1:
-                                       list_move_tail(&curr->cill_linkage,
-                                                      &prev->cill_linkage);
-                                       done = 0;
-                                       continue; /* don't change prev: it's
-                                                  * still "previous" */
-                               case -1: /* already in order */
-                                       break;
-                               }
-                       }
-                       prev = curr;
-               }
-       } while (!done);
-       EXIT;
-}
-
 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
                                const struct cl_lock_descr *d1)
 {
@@ -368,15 +308,19 @@ int cl_io_lock(const struct lu_env *env, struct cl_io *io)
         LINVRNT(cl_io_invariant(io));
 
         ENTRY;
-        cl_io_for_each(scan, io) {
-                if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
-                        continue;
-                result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
-                if (result != 0)
-                        break;
-        }
+       list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
+               if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
+                       continue;
+               result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
+               if (result != 0)
+                       break;
+       }
         if (result == 0) {
-                cl_io_locks_sort(io);
+               /*
+                * Sort locks in lexicographical order of their (fid,
+                * start-offset) pairs to avoid deadlocks.
+                */
+               list_sort(NULL, &io->ci_lockset.cls_todo, cl_lock_descr_cmp);
                 result = cl_lockset_lock(env, io, &io->ci_lockset);
         }
         if (result != 0)
@@ -417,12 +361,11 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
                        link->cill_fini(env, link);
        }
 
-       cl_io_for_each_reverse(scan, io) {
+       list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
                if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
                        scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
        }
        io->ci_state = CIS_UNLOCKED;
-       LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
        EXIT;
 }
 EXPORT_SYMBOL(cl_io_unlock);
@@ -445,14 +388,14 @@ int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
 
         ENTRY;
         result = 0;
-        cl_io_for_each(scan, io) {
-                if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
-                        continue;
-                result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
-                                                                      scan);
-                if (result != 0)
-                        break;
-        }
+       list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
+               if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
+                       continue;
+               result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
+                                                                     scan);
+               if (result != 0)
+                       break;
+       }
         if (result == 0)
                 io->ci_state = CIS_IT_STARTED;
         RETURN(result);
@@ -466,19 +409,20 @@ EXPORT_SYMBOL(cl_io_iter_init);
  */
 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
 {
-        const struct cl_io_slice *scan;
+       const struct cl_io_slice *scan;
 
-        LINVRNT(cl_io_is_loopable(io));
-        LINVRNT(io->ci_state == CIS_UNLOCKED);
-        LINVRNT(cl_io_invariant(io));
+       LINVRNT(cl_io_is_loopable(io));
+       LINVRNT(io->ci_state <= CIS_IT_STARTED ||
+               io->ci_state > CIS_IO_FINISHED);
+       LINVRNT(cl_io_invariant(io));
 
-        ENTRY;
-        cl_io_for_each_reverse(scan, io) {
-                if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
-                        scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
-        }
-        io->ci_state = CIS_IT_ENDED;
-        EXIT;
+       ENTRY;
+       list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
+               if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
+                       scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
+       }
+       io->ci_state = CIS_IT_ENDED;
+       EXIT;
 }
 EXPORT_SYMBOL(cl_io_iter_fini);
 
@@ -487,27 +431,26 @@ EXPORT_SYMBOL(cl_io_iter_fini);
  */
 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
 {
-        const struct cl_io_slice *scan;
+       const struct cl_io_slice *scan;
 
-        LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
-                nob == 0);
-        LINVRNT(cl_io_is_loopable(io));
-        LINVRNT(cl_io_invariant(io));
+       ENTRY;
 
-        ENTRY;
+       LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
+               nob == 0);
+       LINVRNT(cl_io_is_loopable(io));
+       LINVRNT(cl_io_invariant(io));
 
-        io->u.ci_rw.crw_pos   += nob;
-        io->u.ci_rw.crw_count -= nob;
+       io->u.ci_rw.crw_pos   += nob;
+       io->u.ci_rw.crw_count -= nob;
 
-        /* layers have to be notified. */
-        cl_io_for_each_reverse(scan, io) {
-                if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
-                        scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
-                                                                   nob);
-        }
-        EXIT;
+       /* layers have to be notified. */
+       list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
+               if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
+                       scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
+                                                                  nob);
+       }
+       EXIT;
 }
-EXPORT_SYMBOL(cl_io_rw_advance);
 
 /**
  * Adds a lock to a lockset.
@@ -572,13 +515,13 @@ int cl_io_start(const struct lu_env *env, struct cl_io *io)
         ENTRY;
 
         io->ci_state = CIS_IO_GOING;
-        cl_io_for_each(scan, io) {
-                if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
-                        continue;
-                result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
-                if (result != 0)
-                        break;
-        }
+       list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
+               if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
+                       continue;
+               result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
+               if (result != 0)
+                       break;
+       }
         if (result >= 0)
                 result = 0;
         RETURN(result);
@@ -598,78 +541,45 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io)
         LINVRNT(cl_io_invariant(io));
         ENTRY;
 
-        cl_io_for_each_reverse(scan, io) {
-                if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
-                        scan->cis_iop->op[io->ci_type].cio_end(env, scan);
-                /* TODO: error handling. */
-        }
+       list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
+               if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
+                       scan->cis_iop->op[io->ci_type].cio_end(env, scan);
+               /* TODO: error handling. */
+       }
         io->ci_state = CIS_IO_FINISHED;
         EXIT;
 }
 EXPORT_SYMBOL(cl_io_end);
 
-static const struct cl_page_slice *
-cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
-{
-        const struct cl_page_slice *slice;
-
-        slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
-        LINVRNT(slice != NULL);
-        return slice;
-}
-
 /**
- * Called by read io, when page has to be read from the server.
+ * Called by read io, to decide the readahead extent
  *
- * \see cl_io_operations::cio_read_page()
+ * \see cl_io_operations::cio_read_ahead()
  */
-int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
-                    struct cl_page *page)
+int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
+                    pgoff_t start, struct cl_read_ahead *ra)
 {
-        const struct cl_io_slice *scan;
-        struct cl_2queue         *queue;
-        int                       result = 0;
+       const struct cl_io_slice *scan;
+       int                       result = 0;
 
-        LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
-        LINVRNT(cl_page_is_owned(page, io));
-        LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
-        LINVRNT(cl_io_invariant(io));
-        ENTRY;
+       LINVRNT(io->ci_type == CIT_READ ||
+               io->ci_type == CIT_FAULT ||
+               io->ci_type == CIT_WRITE);
+       LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
+       LINVRNT(cl_io_invariant(io));
+       ENTRY;
 
-        queue = &io->ci_queue;
+       list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
+               if (scan->cis_iop->cio_read_ahead == NULL)
+                       continue;
 
-        cl_2queue_init(queue);
-        /*
-         * ->cio_read_page() methods called in the loop below are supposed to
-         * never block waiting for network (the only subtle point is the
-         * creation of new pages for read-ahead that might result in cache
-         * shrinking, but currently only clean pages are shrunk and this
-         * requires no network io).
-         *
-         * Should this ever starts blocking, retry loop would be needed for
-         * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
-         */
-        cl_io_for_each(scan, io) {
-                if (scan->cis_iop->cio_read_page != NULL) {
-                        const struct cl_page_slice *slice;
-
-                        slice = cl_io_slice_page(scan, page);
-                        LINVRNT(slice != NULL);
-                        result = scan->cis_iop->cio_read_page(env, scan, slice);
-                        if (result != 0)
-                                break;
-                }
-        }
-       if (result == 0 && queue->c2_qin.pl_nr > 0)
-               result = cl_io_submit_rw(env, io, CRT_READ, queue);
-        /*
-         * Unlock unsent pages in case of error.
-         */
-        cl_page_list_disown(env, io, &queue->c2_qin);
-        cl_2queue_fini(env, queue);
-        RETURN(result);
+               result = scan->cis_iop->cio_read_ahead(env, scan, start, ra);
+               if (result != 0)
+                       break;
+       }
+       RETURN(result > 0 ? 0 : result);
 }
-EXPORT_SYMBOL(cl_io_read_page);
+EXPORT_SYMBOL(cl_io_read_ahead);
 
 /**
  * Commit a list of contiguous pages into writeback cache.
@@ -678,14 +588,14 @@ EXPORT_SYMBOL(cl_io_read_page);
  * \see cl_io_operations::cio_commit_async()
  */
 int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
-                       struct cl_page_list *queue, int from, int to,
-                       cl_commit_cbt cb)
+                      struct cl_page_list *queue, int from, int to,
+                      cl_commit_cbt cb)
 {
        const struct cl_io_slice *scan;
        int result = 0;
        ENTRY;
 
-       cl_io_for_each(scan, io) {
+       list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
                if (scan->cis_iop->cio_commit_async == NULL)
                        continue;
                result = scan->cis_iop->cio_commit_async(env, scan, queue,
@@ -714,7 +624,7 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
        int result = 0;
        ENTRY;
 
-       cl_io_for_each(scan, io) {
+       list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
                if (scan->cis_iop->cio_submit == NULL)
                        continue;
                result = scan->cis_iop->cio_submit(env, scan, crt, queue);
@@ -746,7 +656,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
                pg->cp_sync_io = anchor;
        }
 
-       cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end);
+       cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
        rc = cl_io_submit_rw(env, io, iot, queue);
        if (rc == 0) {
                /*
@@ -790,7 +700,6 @@ int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
         }
         return result;
 }
-EXPORT_SYMBOL(cl_io_cancel);
 
 /**
  * Main io loop.
@@ -813,41 +722,47 @@ EXPORT_SYMBOL(cl_io_cancel);
  */
 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
 {
-        int result   = 0;
+       int result   = 0;
 
-        LINVRNT(cl_io_is_loopable(io));
-        ENTRY;
+       LINVRNT(cl_io_is_loopable(io));
+       ENTRY;
+
+       do {
+               size_t nob;
+
+               io->ci_continue = 0;
+               result = cl_io_iter_init(env, io);
+               if (result == 0) {
+                       nob    = io->ci_nob;
+                       result = cl_io_lock(env, io);
+                       if (result == 0) {
+                               /*
+                                * Notify layers that locks has been taken,
+                                * and do actual i/o.
+                                *
+                                *   - llite: kms, short read;
+                                *   - llite: generic_file_read();
+                                */
+                               result = cl_io_start(env, io);
+                               /*
+                                * Send any remaining pending
+                                * io, etc.
+                                *
+                                **   - llite: ll_rw_stats_tally.
+                                */
+                               cl_io_end(env, io);
+                               cl_io_unlock(env, io);
+                               cl_io_rw_advance(env, io, io->ci_nob - nob);
+                       }
+               }
+               cl_io_iter_fini(env, io);
+       } while (result == 0 && io->ci_continue);
+
+       if (result == -EWOULDBLOCK && io->ci_ndelay) {
+               io->ci_need_restart = 1;
+               result = 0;
+       }
 
-        do {
-                size_t nob;
-
-                io->ci_continue = 0;
-                result = cl_io_iter_init(env, io);
-                if (result == 0) {
-                        nob    = io->ci_nob;
-                        result = cl_io_lock(env, io);
-                        if (result == 0) {
-                                /*
-                                 * Notify layers that locks has been taken,
-                                 * and do actual i/o.
-                                 *
-                                 *   - llite: kms, short read;
-                                 *   - llite: generic_file_read();
-                                 */
-                                result = cl_io_start(env, io);
-                                /*
-                                 * Send any remaining pending
-                                 * io, etc.
-                                 *
-                                 *   - llite: ll_rw_stats_tally.
-                                 */
-                                cl_io_end(env, io);
-                                cl_io_unlock(env, io);
-                                cl_io_rw_advance(env, io, io->ci_nob - nob);
-                        }
-                }
-                cl_io_iter_fini(env, io);
-        } while (result == 0 && io->ci_continue);
        if (result == 0)
                result = io->ci_result;
        RETURN(result < 0 ? result : 0);
@@ -864,20 +779,20 @@ EXPORT_SYMBOL(cl_io_loop);
  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
  */
 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
-                     struct cl_object *obj,
-                     const struct cl_io_operations *ops)
+                    struct cl_object *obj,
+                    const struct cl_io_operations *ops)
 {
        struct list_head *linkage = &slice->cis_linkage;
 
-        LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
+       LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
                list_empty(linkage));
-        ENTRY;
+       ENTRY;
 
        list_add_tail(linkage, &io->ci_layers);
-        slice->cis_io  = io;
-        slice->cis_obj = obj;
-        slice->cis_iop = ops;
-        EXIT;
+       slice->cis_io  = io;
+       slice->cis_obj = obj;
+       slice->cis_iop = ops;
+       EXIT;
 }
 EXPORT_SYMBOL(cl_io_slice_add);
 
@@ -1045,30 +960,6 @@ void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
 EXPORT_SYMBOL(cl_page_list_fini);
 
 /**
- * Owns all pages in a queue.
- */
-int cl_page_list_own(const struct lu_env *env,
-                    struct cl_io *io, struct cl_page_list *plist)
-{
-       struct cl_page *page;
-       struct cl_page *temp;
-       int result;
-
-       LINVRNT(plist->pl_owner == current);
-
-       ENTRY;
-       result = 0;
-       cl_page_list_for_each_safe(page, temp, plist) {
-               if (cl_page_own(env, io, page) == 0)
-                       result = result ?: page->cp_error;
-               else
-                       cl_page_list_del(env, plist, page);
-       }
-       RETURN(result);
-}
-EXPORT_SYMBOL(cl_page_list_own);
-
-/**
  * Assumes all pages in a queue.
  */
 void cl_page_list_assume(const struct lu_env *env,
@@ -1081,7 +972,6 @@ void cl_page_list_assume(const struct lu_env *env,
        cl_page_list_for_each(page, plist)
                cl_page_assume(env, io, page);
 }
-EXPORT_SYMBOL(cl_page_list_assume);
 
 /**
  * Discards all pages in a queue.
@@ -1157,7 +1047,6 @@ void cl_2queue_assume(const struct lu_env *env,
         cl_page_list_assume(env, io, &queue->c2_qin);
         cl_page_list_assume(env, io, &queue->c2_qout);
 }
-EXPORT_SYMBOL(cl_2queue_assume);
 
 /**
  * Finalize both page lists of a 2-queue.
@@ -1206,282 +1095,47 @@ void cl_io_print(const struct lu_env *env, void *cookie,
 }
 
 /**
- * Adds request slice to the compound request.
- *
- * This is called by cl_device_operations::cdo_req_init() methods to add a
- * per-layer state to the request. New state is added at the end of
- * cl_req::crq_layers list, that is, it is at the bottom of the stack.
- *
- * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
- */
-void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
-                      struct cl_device *dev,
-                      const struct cl_req_operations *ops)
-{
-        ENTRY;
-       list_add_tail(&slice->crs_linkage, &req->crq_layers);
-        slice->crs_dev = dev;
-        slice->crs_ops = ops;
-        slice->crs_req = req;
-        EXIT;
-}
-EXPORT_SYMBOL(cl_req_slice_add);
-
-static void cl_req_free(const struct lu_env *env, struct cl_req *req)
-{
-        unsigned i;
-
-       LASSERT(list_empty(&req->crq_pages));
-        LASSERT(req->crq_nrpages == 0);
-       LINVRNT(list_empty(&req->crq_layers));
-        LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
-        ENTRY;
-
-        if (req->crq_o != NULL) {
-                for (i = 0; i < req->crq_nrobjs; ++i) {
-                        struct cl_object *obj = req->crq_o[i].ro_obj;
-                       if (obj != NULL) {
-                               lu_object_ref_del_at(&obj->co_lu,
-                                                    &req->crq_o[i].ro_obj_ref,
-                                                    "cl_req", req);
-                               cl_object_put(env, obj);
-                       }
-                }
-                OBD_FREE(req->crq_o, req->crq_nrobjs * sizeof req->crq_o[0]);
-        }
-        OBD_FREE_PTR(req);
-        EXIT;
-}
-
-static int cl_req_init(const struct lu_env *env, struct cl_req *req,
-                       struct cl_page *page)
-{
-       struct cl_device     *dev;
-       struct cl_page_slice *slice;
-       int result;
-
-       ENTRY;
-       result = 0;
-       list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
-               dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
-               if (dev->cd_ops->cdo_req_init != NULL) {
-                       result = dev->cd_ops->cdo_req_init(env,
-                                       dev, req);
-                       if (result != 0)
-                               break;
-               }
-       }
-       RETURN(result);
-}
-
-/**
- * Invokes per-request transfer completion call-backs
- * (cl_req_operations::cro_completion()) bottom-to-top.
- */
-void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
-{
-        struct cl_req_slice *slice;
-
-        ENTRY;
-        /*
-         * for the lack of list_for_each_entry_reverse_safe()...
-         */
-       while (!list_empty(&req->crq_layers)) {
-               slice = list_entry(req->crq_layers.prev,
-                                  struct cl_req_slice, crs_linkage);
-               list_del_init(&slice->crs_linkage);
-                if (slice->crs_ops->cro_completion != NULL)
-                        slice->crs_ops->cro_completion(env, slice, rc);
-        }
-        cl_req_free(env, req);
-        EXIT;
-}
-EXPORT_SYMBOL(cl_req_completion);
-
-/**
- * Allocates new transfer request.
- */
-struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
-                            enum cl_req_type crt, int nr_objects)
-{
-        struct cl_req *req;
-
-        LINVRNT(nr_objects > 0);
-        ENTRY;
-
-        OBD_ALLOC_PTR(req);
-        if (req != NULL) {
-                int result;
-
-               req->crq_type = crt;
-               INIT_LIST_HEAD(&req->crq_pages);
-               INIT_LIST_HEAD(&req->crq_layers);
-
-                OBD_ALLOC(req->crq_o, nr_objects * sizeof req->crq_o[0]);
-                if (req->crq_o != NULL) {
-                        req->crq_nrobjs = nr_objects;
-                        result = cl_req_init(env, req, page);
-                } else
-                        result = -ENOMEM;
-                if (result != 0) {
-                        cl_req_completion(env, req, result);
-                        req = ERR_PTR(result);
-                }
-        } else
-                req = ERR_PTR(-ENOMEM);
-        RETURN(req);
-}
-EXPORT_SYMBOL(cl_req_alloc);
-
-/**
- * Adds a page to a request.
- */
-void cl_req_page_add(const struct lu_env *env,
-                     struct cl_req *req, struct cl_page *page)
-{
-       struct cl_object  *obj;
-       struct cl_req_obj *rqo;
-       unsigned int i;
-
-       ENTRY;
-
-       LASSERT(list_empty(&page->cp_flight));
-        LASSERT(page->cp_req == NULL);
-
-        CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
-                      req, req->crq_type, req->crq_nrpages);
-
-       list_add_tail(&page->cp_flight, &req->crq_pages);
-        ++req->crq_nrpages;
-        page->cp_req = req;
-        obj = cl_object_top(page->cp_obj);
-        for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
-                if (rqo->ro_obj == NULL) {
-                        rqo->ro_obj = obj;
-                        cl_object_get(obj);
-                       lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
-                                            "cl_req", req);
-                       break;
-               }
-       }
-       LASSERT(i < req->crq_nrobjs);
-       EXIT;
-}
-EXPORT_SYMBOL(cl_req_page_add);
-
-/**
- * Removes a page from a request.
- */
-void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
-{
-       struct cl_req *req = page->cp_req;
-
-       ENTRY;
-
-       LASSERT(!list_empty(&page->cp_flight));
-       LASSERT(req->crq_nrpages > 0);
-
-       list_del_init(&page->cp_flight);
-       --req->crq_nrpages;
-       page->cp_req = NULL;
-       EXIT;
-}
-EXPORT_SYMBOL(cl_req_page_done);
-
-/**
- * Notifies layers that request is about to depart by calling
- * cl_req_operations::cro_prep() top-to-bottom.
- */
-int cl_req_prep(const struct lu_env *env, struct cl_req *req)
-{
-       unsigned int i;
-        int result;
-        const struct cl_req_slice *slice;
-
-        ENTRY;
-        /*
-         * Check that the caller of cl_req_alloc() didn't lie about the number
-         * of objects.
-         */
-        for (i = 0; i < req->crq_nrobjs; ++i)
-                LASSERT(req->crq_o[i].ro_obj != NULL);
-
-        result = 0;
-       list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
-                if (slice->crs_ops->cro_prep != NULL) {
-                        result = slice->crs_ops->cro_prep(env, slice);
-                        if (result != 0)
-                                break;
-                }
-        }
-        RETURN(result);
-}
-EXPORT_SYMBOL(cl_req_prep);
-
-/**
  * Fills in attributes that are passed to server together with transfer. Only
  * attributes from \a flags may be touched. This can be called multiple times
  * for the same request.
  */
-void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
-                     struct cl_req_attr *attr, obd_valid flags)
+void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+                    struct cl_req_attr *attr)
 {
-        const struct cl_req_slice *slice;
-        struct cl_page            *page;
-       unsigned int i;
-
-       LASSERT(!list_empty(&req->crq_pages));
-        ENTRY;
+       struct cl_object *scan;
+       ENTRY;
 
-        /* Take any page to use as a model. */
-       page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
-
-        for (i = 0; i < req->crq_nrobjs; ++i) {
-               list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
-                        const struct cl_page_slice *scan;
-                        const struct cl_object     *obj;
-
-                        scan = cl_page_at(page,
-                                          slice->crs_dev->cd_lu_dev.ld_type);
-                        LASSERT(scan != NULL);
-                        obj = scan->cpl_obj;
-                        if (slice->crs_ops->cro_attr_set != NULL)
-                                slice->crs_ops->cro_attr_set(env, slice, obj,
-                                                             attr + i, flags);
-                }
-        }
-        EXIT;
+       cl_object_for_each(scan, obj) {
+               if (scan->co_ops->coo_req_attr_set != NULL)
+                       scan->co_ops->coo_req_attr_set(env, scan, attr);
+       }
+       EXIT;
 }
 EXPORT_SYMBOL(cl_req_attr_set);
 
-/* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
- * wait for the IO to finish. */
-void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
-{
-       wake_up_all(&anchor->csi_waitq);
-
-       /* it's safe to nuke or reuse anchor now */
-       atomic_set(&anchor->csi_barrier, 0);
-}
-EXPORT_SYMBOL(cl_sync_io_end);
-
 /**
- * Initialize synchronous io wait anchor
+ * Initialize synchronous io wait \a anchor for \a nr pages with optional
+ * \a end handler.
+ * \param anchor owned by caller, initialzied here.
+ * \param nr number of pages initally pending in sync.
+ * \param end optional callback sync_io completion, can be used to
+ *  trigger erasure coding, integrity, dedupe, or similar operation.
+ * \q end is called with a spinlock on anchor->csi_waitq.lock
  */
-void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
-                    void (*end)(const struct lu_env *, struct cl_sync_io *))
+
+void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr,
+                           struct cl_dio_aio *aio, cl_sync_io_end_t *end)
 {
        ENTRY;
        memset(anchor, 0, sizeof(*anchor));
        init_waitqueue_head(&anchor->csi_waitq);
        atomic_set(&anchor->csi_sync_nr, nr);
-       atomic_set(&anchor->csi_barrier, nr > 0);
        anchor->csi_sync_rc = 0;
        anchor->csi_end_io = end;
-       LASSERT(end != NULL);
+       anchor->csi_aio = aio;
        EXIT;
 }
-EXPORT_SYMBOL(cl_sync_io_init);
+EXPORT_SYMBOL(cl_sync_io_init_notify);
 
 /**
  * Wait until all IO completes. Transfer completion routine has to call
@@ -1490,37 +1144,85 @@ EXPORT_SYMBOL(cl_sync_io_init);
 int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
                    long timeout)
 {
-       struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
-                                                 NULL, NULL, NULL);
-       int rc;
+       int rc = 0;
        ENTRY;
 
        LASSERT(timeout >= 0);
 
-       rc = l_wait_event(anchor->csi_waitq,
-                         atomic_read(&anchor->csi_sync_nr) == 0,
-                         &lwi);
-       if (rc < 0) {
+       if (timeout > 0 &&
+           wait_event_idle_timeout(anchor->csi_waitq,
+                                   atomic_read(&anchor->csi_sync_nr) == 0,
+                                   cfs_time_seconds(timeout)) == 0) {
+               rc = -ETIMEDOUT;
                CERROR("IO failed: %d, still wait for %d remaining entries\n",
                       rc, atomic_read(&anchor->csi_sync_nr));
+       }
 
-               lwi = (struct l_wait_info) { 0 };
-               (void)l_wait_event(anchor->csi_waitq,
-                                  atomic_read(&anchor->csi_sync_nr) == 0,
-                                  &lwi);
-       } else {
+       wait_event_idle(anchor->csi_waitq,
+                       atomic_read(&anchor->csi_sync_nr) == 0);
+       if (!rc)
                rc = anchor->csi_sync_rc;
-       }
+
+       /* We take the lock to ensure that cl_sync_io_note() has finished */
+       spin_lock(&anchor->csi_waitq.lock);
        LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
+       spin_unlock(&anchor->csi_waitq.lock);
 
-       /* wait until cl_sync_io_note() has done wakeup */
-       while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
-               cpu_relax();
-       }
        RETURN(rc);
 }
 EXPORT_SYMBOL(cl_sync_io_wait);
 
+#ifndef HAVE_AIO_COMPLETE
+static inline void aio_complete(struct kiocb *iocb, ssize_t res, ssize_t res2)
+{
+       if (iocb->ki_complete)
+               iocb->ki_complete(iocb, res, res2);
+}
+#endif
+
+static void cl_aio_end(const struct lu_env *env, struct cl_sync_io *anchor)
+{
+       struct cl_dio_aio *aio = container_of(anchor, typeof(*aio), cda_sync);
+       ssize_t ret = anchor->csi_sync_rc;
+
+       ENTRY;
+
+       /* release pages */
+       while (aio->cda_pages.pl_nr > 0) {
+               struct cl_page *page = cl_page_list_first(&aio->cda_pages);
+
+               cl_page_get(page);
+               cl_page_list_del(env, &aio->cda_pages, page);
+               cl_page_delete(env, page);
+               cl_page_put(env, page);
+       }
+
+       if (!is_sync_kiocb(aio->cda_iocb))
+               aio_complete(aio->cda_iocb, ret ?: aio->cda_bytes, 0);
+
+       EXIT;
+}
+
+struct cl_dio_aio *cl_aio_alloc(struct kiocb *iocb)
+{
+       struct cl_dio_aio *aio;
+
+       OBD_SLAB_ALLOC_PTR_GFP(aio, cl_dio_aio_kmem, GFP_NOFS);
+       if (aio != NULL) {
+               /*
+                * Hold one ref so that it won't be released until
+                * every pages is added.
+                */
+               cl_sync_io_init_notify(&aio->cda_sync, 1, is_sync_kiocb(iocb) ?
+                                      NULL : aio, cl_aio_end);
+               cl_page_list_init(&aio->cda_pages);
+               aio->cda_iocb = iocb;
+       }
+       return aio;
+}
+EXPORT_SYMBOL(cl_aio_alloc);
+
+
 /**
  * Indicate that transfer of a single page completed.
  */
@@ -1536,10 +1238,34 @@ void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
         * IO.
         */
        LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
-       if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
-               LASSERT(anchor->csi_end_io != NULL);
-               anchor->csi_end_io(env, anchor);
-               /* Can't access anchor any more */
+       if (atomic_dec_and_lock(&anchor->csi_sync_nr,
+                               &anchor->csi_waitq.lock)) {
+               struct cl_dio_aio *aio = NULL;
+
+               cl_sync_io_end_t *end_io = anchor->csi_end_io;
+
+               /*
+                * Holding the lock across both the decrement and
+                * the wakeup ensures cl_sync_io_wait() doesn't complete
+                * before the wakeup completes and the contents of
+                * of anchor become unsafe to access as the owner is free
+                * to immediately reclaim anchor when cl_sync_io_wait()
+                * completes.
+                */
+               wake_up_all_locked(&anchor->csi_waitq);
+               if (end_io)
+                       end_io(env, anchor);
+               if (anchor->csi_aio)
+                       aio = anchor->csi_aio;
+
+               spin_unlock(&anchor->csi_waitq.lock);
+
+               /**
+                * If anchor->csi_aio is set, we are responsible for freeing
+                * memory here rather than when cl_sync_io_wait() completes.
+                */
+               if (aio)
+                       OBD_SLAB_FREE_PTR(aio, cl_dio_aio_kmem);
        }
        EXIT;
 }