Whamcloud - gitweb
LU-1032 build: Add Lustre DKMS spec file
[fs/lustre-release.git] / lustre / obdclass / cl_io.c
index b7ddaed..1887381 100644 (file)
@@ -724,41 +724,6 @@ cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
 }
 
 /**
- * True iff \a page is within \a io range.
- */
-static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
-{
-        int     result = 1;
-        loff_t  start;
-        loff_t  end;
-        pgoff_t idx;
-
-        idx = page->cp_index;
-        switch (io->ci_type) {
-        case CIT_READ:
-        case CIT_WRITE:
-                /*
-                 * check that [start, end) and [pos, pos + count) extents
-                 * overlap.
-                 */
-                if (!cl_io_is_append(io)) {
-                        const struct cl_io_rw_common *crw = &(io->u.ci_rw);
-                        start = cl_offset(page->cp_obj, idx);
-                        end   = cl_offset(page->cp_obj, idx + 1);
-                        result = crw->crw_pos < end &&
-                                 start < crw->crw_pos + crw->crw_count;
-                }
-                break;
-        case CIT_FAULT:
-                result = io->u.ci_fault.ft_index == idx;
-                break;
-        default:
-                LBUG();
-        }
-        return result;
-}
-
-/**
  * Called by read io, when page has to be read from the server.
  *
  * \see cl_io_operations::cio_read_page()
@@ -773,7 +738,6 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
         LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
         LINVRNT(cl_page_is_owned(page, io));
         LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
-        LINVRNT(cl_page_in_io(page, io));
         LINVRNT(cl_io_invariant(io));
         ENTRY;
 
@@ -801,7 +765,7 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
                                 break;
                 }
         }
-        if (result == 0)
+       if (result == 0 && queue->c2_qin.pl_nr > 0)
                result = cl_io_submit_rw(env, io, CRT_READ, queue);
         /*
          * Unlock unsent pages in case of error.
@@ -813,79 +777,30 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
 EXPORT_SYMBOL(cl_io_read_page);
 
 /**
- * Called by write io to prepare page to receive data from user buffer.
+ * Commit a list of contiguous pages into writeback cache.
  *
- * \see cl_io_operations::cio_prepare_write()
+ * \returns 0 if all pages committed, or errcode if error occurred.
+ * \see cl_io_operations::cio_commit_async()
  */
-int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
-                        struct cl_page *page, unsigned from, unsigned to)
+int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
+                       struct cl_page_list *queue, int from, int to,
+                       cl_commit_cbt cb)
 {
-        const struct cl_io_slice *scan;
-        int result = 0;
-
-        LINVRNT(io->ci_type == CIT_WRITE);
-        LINVRNT(cl_page_is_owned(page, io));
-        LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
-        LINVRNT(cl_io_invariant(io));
-        LASSERT(cl_page_in_io(page, io));
-        ENTRY;
-
-        cl_io_for_each_reverse(scan, io) {
-                if (scan->cis_iop->cio_prepare_write != NULL) {
-                        const struct cl_page_slice *slice;
-
-                        slice = cl_io_slice_page(scan, page);
-                        result = scan->cis_iop->cio_prepare_write(env, scan,
-                                                                  slice,
-                                                                  from, to);
-                        if (result != 0)
-                                break;
-                }
-        }
-        RETURN(result);
-}
-EXPORT_SYMBOL(cl_io_prepare_write);
-
-/**
- * Called by write io after user data were copied into a page.
- *
- * \see cl_io_operations::cio_commit_write()
- */
-int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
-                       struct cl_page *page, unsigned from, unsigned to)
-{
-        const struct cl_io_slice *scan;
-        int result = 0;
-
-        LINVRNT(io->ci_type == CIT_WRITE);
-        LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
-        LINVRNT(cl_io_invariant(io));
-        /*
-         * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
-         * already called cl_page_cache_add(), moving page into CPS_CACHED
-         * state. Better (and more general) way of dealing with such situation
-         * is needed.
-         */
-        LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
-        LASSERT(cl_page_in_io(page, io));
-        ENTRY;
-
-        cl_io_for_each(scan, io) {
-                if (scan->cis_iop->cio_commit_write != NULL) {
-                        const struct cl_page_slice *slice;
+       const struct cl_io_slice *scan;
+       int result = 0;
+       ENTRY;
 
-                        slice = cl_io_slice_page(scan, page);
-                        result = scan->cis_iop->cio_commit_write(env, scan,
-                                                                 slice,
-                                                                 from, to);
-                        if (result != 0)
-                                break;
-                }
-        }
-        LINVRNT(result <= 0);
-        RETURN(result);
+       cl_io_for_each(scan, io) {
+               if (scan->cis_iop->cio_commit_async == NULL)
+                       continue;
+               result = scan->cis_iop->cio_commit_async(env, scan, queue,
+                                                        from, to, cb);
+               if (result != 0)
+                       break;
+       }
+       RETURN(result);
 }
-EXPORT_SYMBOL(cl_io_commit_write);
+EXPORT_SYMBOL(cl_io_commit_async);
 
 /**
  * Submits a list of pages for immediate io.
@@ -900,25 +815,22 @@ EXPORT_SYMBOL(cl_io_commit_write);
 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
                    enum cl_req_type crt, struct cl_2queue *queue)
 {
-        const struct cl_io_slice *scan;
-        int result = 0;
-
-        LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
-        ENTRY;
+       const struct cl_io_slice *scan;
+       int result = 0;
+       ENTRY;
 
-        cl_io_for_each(scan, io) {
-                if (scan->cis_iop->req_op[crt].cio_submit == NULL)
-                        continue;
-                result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
-                                                              queue);
-                if (result != 0)
-                        break;
-        }
-        /*
-         * If ->cio_submit() failed, no pages were sent.
-         */
-        LASSERT(ergo(result != 0, cfs_list_empty(&queue->c2_qout.pl_pages)));
-        RETURN(result);
+       cl_io_for_each(scan, io) {
+               if (scan->cis_iop->cio_submit == NULL)
+                       continue;
+               result = scan->cis_iop->cio_submit(env, scan, crt, queue);
+               if (result != 0)
+                       break;
+       }
+       /*
+        * If ->cio_submit() failed, no pages were sent.
+        */
+       LASSERT(ergo(result != 0, cfs_list_empty(&queue->c2_qout.pl_pages)));
+       RETURN(result);
 }
 EXPORT_SYMBOL(cl_io_submit_rw);
 
@@ -978,7 +890,6 @@ int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
         cl_page_list_for_each(page, queue) {
                 int rc;
 
-                LINVRNT(cl_page_in_io(page, io));
                 rc = cl_page_cancel(env, page);
                 result = result ?: rc;
         }
@@ -1154,6 +1065,26 @@ void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
 EXPORT_SYMBOL(cl_page_list_move);
 
 /**
+ * Moves a page from one page list to the head of another list.
+ */
+void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
+                           struct cl_page *page)
+{
+       LASSERT(src->pl_nr > 0);
+       LINVRNT(dst->pl_owner == current);
+       LINVRNT(src->pl_owner == current);
+
+       ENTRY;
+       cfs_list_move(&page->cp_batch, &dst->pl_pages);
+       --src->pl_nr;
+       ++dst->pl_nr;
+       lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
+                       src, dst);
+       EXIT;
+}
+EXPORT_SYMBOL(cl_page_list_move_head);
+
+/**
  * splice the cl_page_list, just as list head does
  */
 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
@@ -1237,7 +1168,6 @@ int cl_page_list_own(const struct lu_env *env,
 {
        struct cl_page *page;
        struct cl_page *temp;
-       pgoff_t index = 0;
        int result;
 
        LINVRNT(plist->pl_owner == current);
@@ -1245,8 +1175,6 @@ int cl_page_list_own(const struct lu_env *env,
        ENTRY;
        result = 0;
        cl_page_list_for_each_safe(page, temp, plist) {
-               LASSERT(index <= page->cp_index);
-               index = page->cp_index;
                if (cl_page_own(env, io, page) == 0)
                        result = result ?: page->cp_error;
                else
@@ -1374,7 +1302,7 @@ EXPORT_SYMBOL(cl_2queue_init_page);
 /**
  * Returns top-level io.
  *
- * \see cl_object_top(), cl_page_top().
+ * \see cl_object_top()
  */
 struct cl_io *cl_io_top(struct cl_io *io)
 {
@@ -1444,26 +1372,22 @@ static void cl_req_free(const struct lu_env *env, struct cl_req *req)
 static int cl_req_init(const struct lu_env *env, struct cl_req *req,
                        struct cl_page *page)
 {
-        struct cl_device     *dev;
-        struct cl_page_slice *slice;
-        int result;
+       struct cl_device     *dev;
+       struct cl_page_slice *slice;
+       int result;
 
-        ENTRY;
-        result = 0;
-        page = cl_page_top(page);
-        do {
-                cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
-                        dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
-                        if (dev->cd_ops->cdo_req_init != NULL) {
-                                result = dev->cd_ops->cdo_req_init(env,
-                                                                   dev, req);
-                                if (result != 0)
-                                        break;
-                        }
-                }
-                page = page->cp_child;
-        } while (page != NULL && result == 0);
-        RETURN(result);
+       ENTRY;
+       result = 0;
+       cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+               dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
+               if (dev->cd_ops->cdo_req_init != NULL) {
+                       result = dev->cd_ops->cdo_req_init(env,
+                                       dev, req);
+                       if (result != 0)
+                               break;
+               }
+       }
+       RETURN(result);
 }
 
 /**
@@ -1530,12 +1454,11 @@ EXPORT_SYMBOL(cl_req_alloc);
 void cl_req_page_add(const struct lu_env *env,
                      struct cl_req *req, struct cl_page *page)
 {
-        struct cl_object  *obj;
-        struct cl_req_obj *rqo;
-        int i;
+       struct cl_object  *obj;
+       struct cl_req_obj *rqo;
+       int i;
 
-        ENTRY;
-        page = cl_page_top(page);
+       ENTRY;
 
         LASSERT(cfs_list_empty(&page->cp_flight));
         LASSERT(page->cp_req == NULL);
@@ -1553,11 +1476,11 @@ void cl_req_page_add(const struct lu_env *env,
                         cl_object_get(obj);
                        lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
                                             "cl_req", req);
-                        break;
-                }
-        }
-        LASSERT(i < req->crq_nrobjs);
-        EXIT;
+                       break;
+               }
+       }
+       LASSERT(i < req->crq_nrobjs);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_req_page_add);
 
@@ -1566,18 +1489,17 @@ EXPORT_SYMBOL(cl_req_page_add);
  */
 void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
 {
-        struct cl_req *req = page->cp_req;
+       struct cl_req *req = page->cp_req;
 
-        ENTRY;
-        page = cl_page_top(page);
+       ENTRY;
 
-        LASSERT(!cfs_list_empty(&page->cp_flight));
-        LASSERT(req->crq_nrpages > 0);
+       LASSERT(!cfs_list_empty(&page->cp_flight));
+       LASSERT(req->crq_nrpages > 0);
 
-        cfs_list_del_init(&page->cp_flight);
-        --req->crq_nrpages;
-        page->cp_req = NULL;
-        EXIT;
+       cfs_list_del_init(&page->cp_flight);
+       --req->crq_nrpages;
+       page->cp_req = NULL;
+       EXIT;
 }
 EXPORT_SYMBOL(cl_req_page_done);
 
@@ -1662,8 +1584,8 @@ void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
 {
        ENTRY;
        init_waitqueue_head(&anchor->csi_waitq);
-       cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
-       cfs_atomic_set(&anchor->csi_barrier, nrpages > 0);
+       atomic_set(&anchor->csi_sync_nr, nrpages);
+       atomic_set(&anchor->csi_barrier, nrpages > 0);
        anchor->csi_sync_rc = 0;
        EXIT;
 }
@@ -1685,27 +1607,27 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
         LASSERT(timeout >= 0);
 
         rc = l_wait_event(anchor->csi_waitq,
-                          cfs_atomic_read(&anchor->csi_sync_nr) == 0,
+                         atomic_read(&anchor->csi_sync_nr) == 0,
                           &lwi);
         if (rc < 0) {
                 CERROR("SYNC IO failed with error: %d, try to cancel "
                        "%d remaining pages\n",
-                       rc, cfs_atomic_read(&anchor->csi_sync_nr));
+                      rc, atomic_read(&anchor->csi_sync_nr));
 
                 (void)cl_io_cancel(env, io, queue);
 
                 lwi = (struct l_wait_info) { 0 };
                 (void)l_wait_event(anchor->csi_waitq,
-                                   cfs_atomic_read(&anchor->csi_sync_nr) == 0,
+                                  atomic_read(&anchor->csi_sync_nr) == 0,
                                    &lwi);
         } else {
                 rc = anchor->csi_sync_rc;
         }
-        LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0);
+       LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
         cl_page_list_assume(env, io, queue);
 
        /* wait until cl_sync_io_note() has done wakeup */
-       while (unlikely(cfs_atomic_read(&anchor->csi_barrier) != 0)) {
+       while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
 #ifdef __KERNEL__
                cpu_relax();
 #endif
@@ -1729,11 +1651,11 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
          * ->{prepare,commit}_write(). Completion is used to signal the end of
          * IO.
          */
-        LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
-       if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr)) {
+       LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
+       if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
                wake_up_all(&anchor->csi_waitq);
                /* it's safe to nuke or reuse anchor now */
-               cfs_atomic_set(&anchor->csi_barrier, 0);
+               atomic_set(&anchor->csi_barrier, 0);
        }
        EXIT;
 }