Whamcloud - gitweb
LU-848 clio: page writeback support
[fs/lustre-release.git] / lustre / obdclass / cl_io.c
index 8cc0839..cf18605 100644 (file)
@@ -26,7 +26,7 @@
  * GPL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  */
 /*
@@ -60,9 +60,9 @@
  */
 
 #define cl_io_for_each(slice, io) \
-        list_for_each_entry((slice), &io->ci_layers, cis_linkage)
+        cfs_list_for_each_entry((slice), &io->ci_layers, cis_linkage)
 #define cl_io_for_each_reverse(slice, io)                 \
-        list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
+        cfs_list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
 
 static inline int cl_io_type_is_valid(enum cl_io_type type)
 {
@@ -75,15 +75,6 @@ static inline int cl_io_is_loopable(const struct cl_io *io)
 }
 
 /**
- * True, iff \a io is a sendfile().
- */
-int cl_io_is_sendfile(const struct cl_io *io)
-{
-        return io->ci_type == CIT_READ && io->u.ci_rd.rd_is_sendfile;
-}
-EXPORT_SYMBOL(cl_io_is_sendfile);
-
-/**
  * Returns true iff there is an IO ongoing in the given environment.
  */
 int cl_io_is_going(const struct lu_env *env)
@@ -123,10 +114,10 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
         LINVRNT(cl_io_invariant(io));
         ENTRY;
 
-        while (!list_empty(&io->ci_layers)) {
+        while (!cfs_list_empty(&io->ci_layers)) {
                 slice = container_of(io->ci_layers.next, struct cl_io_slice,
                                      cis_linkage);
-                list_del_init(&slice->cis_linkage);
+                cfs_list_del_init(&slice->cis_linkage);
                 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
                         slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
                 /*
@@ -228,7 +219,7 @@ int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
 
         LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
                          "io range: %u ["LPU64", "LPU64") %u %u\n",
-                         iot, (__u64)pos, (__u64)pos + count),
+                         iot, (__u64)pos, (__u64)pos + count,
                          io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
         io->u.ci_rw.crw_pos    = pos;
         io->u.ci_rw.crw_count  = count;
@@ -242,13 +233,41 @@ cl_lock_descr_fid(const struct cl_lock_descr *descr)
         return lu_object_fid(&descr->cld_obj->co_lu);
 }
 
-static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
-                             const struct cl_lock_descr *d1)
+static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
+                              const struct cl_lock_descr *d1)
 {
         return lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)) ?:
                 __diff_normalize(d0->cld_start, d1->cld_start);
 }
 
+static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
+                             const struct cl_lock_descr *d1)
+{
+        int ret;
+
+        ret = lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1));
+        if (ret)
+                return ret;
+        if (d0->cld_end < d1->cld_start)
+                return -1;
+        if (d0->cld_start > d0->cld_end)
+                return 1;
+        return 0;
+}
+
+static void cl_lock_descr_merge(struct cl_lock_descr *d0,
+                                const struct cl_lock_descr *d1)
+{
+        d0->cld_start = min(d0->cld_start, d1->cld_start);
+        d0->cld_end = max(d0->cld_end, d1->cld_end);
+
+        if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
+                d0->cld_mode = CLM_WRITE;
+
+        if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
+                d0->cld_mode = CLM_GROUP;
+}
+
 /*
  * Sort locks in lexicographical order of their (fid, start-offset) pairs.
  */
@@ -266,10 +285,11 @@ static void cl_io_locks_sort(struct cl_io *io)
                 done = 1;
                 prev = NULL;
 
-                list_for_each_entry_safe(curr, temp, &io->ci_lockset.cls_todo,
-                                         cill_linkage) {
+                cfs_list_for_each_entry_safe(curr, temp,
+                                             &io->ci_lockset.cls_todo,
+                                             cill_linkage) {
                         if (prev != NULL) {
-                                switch (cl_lock_descr_cmp(&prev->cill_descr,
+                                switch (cl_lock_descr_sort(&prev->cill_descr,
                                                           &curr->cill_descr)) {
                                 case 0:
                                         /*
@@ -280,8 +300,8 @@ static void cl_io_locks_sort(struct cl_io *io)
                                 default:
                                         LBUG();
                                 case +1:
-                                        list_move_tail(&curr->cill_linkage,
-                                                       &prev->cill_linkage);
+                                        cfs_list_move_tail(&curr->cill_linkage,
+                                                           &prev->cill_linkage);
                                         done = 0;
                                         continue; /* don't change prev: it's
                                                    * still "previous" */
@@ -301,26 +321,51 @@ static void cl_io_locks_sort(struct cl_io *io)
  * \retval +ve there is a matching lock in the \a queue
  * \retval   0 there are no matching locks in the \a queue
  */
-int cl_queue_match(const struct list_head *queue,
+int cl_queue_match(const cfs_list_t *queue,
                    const struct cl_lock_descr *need)
 {
        struct cl_io_lock_link *scan;
 
        ENTRY;
-       list_for_each_entry(scan, queue, cill_linkage) {
+       cfs_list_for_each_entry(scan, queue, cill_linkage) {
                if (cl_lock_descr_match(&scan->cill_descr, need))
                        RETURN(+1);
        }
-       return 0;
+       RETURN(0);
 }
 EXPORT_SYMBOL(cl_queue_match);
 
+static int cl_queue_merge(const cfs_list_t *queue,
+                          const struct cl_lock_descr *need)
+{
+       struct cl_io_lock_link *scan;
+
+       ENTRY;
+       cfs_list_for_each_entry(scan, queue, cill_linkage) {
+               if (cl_lock_descr_cmp(&scan->cill_descr, need))
+                       continue;
+               cl_lock_descr_merge(&scan->cill_descr, need);
+               CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
+                      scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
+                      scan->cill_descr.cld_end);
+               RETURN(+1);
+       }
+       RETURN(0);
+
+}
+
 static int cl_lockset_match(const struct cl_lockset *set,
-                            const struct cl_lock_descr *need, int all_queues)
+                            const struct cl_lock_descr *need)
 {
-        return (all_queues ? cl_queue_match(&set->cls_todo, need) : 0) ||
-                cl_queue_match(&set->cls_curr, need) ||
-                cl_queue_match(&set->cls_done, need);
+        return cl_queue_match(&set->cls_curr, need) ||
+               cl_queue_match(&set->cls_done, need);
+}
+
+static int cl_lockset_merge(const struct cl_lockset *set,
+                            const struct cl_lock_descr *need)
+{
+        return cl_queue_merge(&set->cls_todo, need) ||
+               cl_lockset_match(set, need);
 }
 
 static int cl_lockset_lock_one(const struct lu_env *env,
@@ -332,15 +377,15 @@ static int cl_lockset_lock_one(const struct lu_env *env,
 
         ENTRY;
 
-        lock = cl_lock_request(env, io, &link->cill_descr, link->cill_enq_flags,
-                               "io", io);
+        lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
         if (!IS_ERR(lock)) {
                 link->cill_lock = lock;
-                list_move(&link->cill_linkage, &set->cls_curr);
-                if (!(link->cill_enq_flags & CEF_ASYNC)) {
+                cfs_list_move(&link->cill_linkage, &set->cls_curr);
+                if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
                         result = cl_wait(env, lock);
                         if (result == 0)
-                                list_move(&link->cill_linkage, &set->cls_done);
+                                cfs_list_move(&link->cill_linkage,
+                                              &set->cls_done);
                 } else
                         result = 0;
         } else
@@ -354,7 +399,7 @@ static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
         struct cl_lock *lock = link->cill_lock;
 
         ENTRY;
-        list_del_init(&link->cill_linkage);
+        cfs_list_del_init(&link->cill_linkage);
         if (lock != NULL) {
                 cl_lock_release(env, lock, "io", io);
                 link->cill_lock = NULL;
@@ -374,8 +419,8 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
 
         ENTRY;
         result = 0;
-        list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
-                if (!cl_lockset_match(set, &link->cill_descr, 0)) {
+        cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
+                if (!cl_lockset_match(set, &link->cill_descr)) {
                         /* XXX some locking to guarantee that locks aren't
                          * expanded in between. */
                         result = cl_lockset_lock_one(env, io, set, link);
@@ -385,12 +430,13 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
                         cl_lock_link_fini(env, io, link);
         }
         if (result == 0) {
-                list_for_each_entry_safe(link, temp,
-                                         &set->cls_curr, cill_linkage) {
+                cfs_list_for_each_entry_safe(link, temp,
+                                             &set->cls_curr, cill_linkage) {
                         lock = link->cill_lock;
                         result = cl_wait(env, lock);
                         if (result == 0)
-                                list_move(&link->cill_linkage, &set->cls_done);
+                                cfs_list_move(&link->cill_linkage,
+                                              &set->cls_done);
                         else
                                 break;
                 }
@@ -451,13 +497,13 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
         ENTRY;
         set = &io->ci_lockset;
 
-        list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
+        cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
                 cl_lock_link_fini(env, io, link);
 
-        list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
+        cfs_list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
                 cl_lock_link_fini(env, io, link);
 
-        list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
+        cfs_list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
                 cl_unuse(env, link->cill_lock);
                 cl_lock_link_fini(env, io, link);
         }
@@ -562,10 +608,10 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
         int result;
 
         ENTRY;
-        if (cl_lockset_match(&io->ci_lockset, &link->cill_descr, 1))
+        if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
                 result = +1;
         else {
-                list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
+                cfs_list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
                 result = 0;
         }
         RETURN(result);
@@ -582,7 +628,7 @@ static void cl_free_io_lock_link(const struct lu_env *env,
  * Allocates new lock link, and uses it to add a lock to a lockset.
  */
 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
-                         struct cl_lock_descr *descr, int enqflags)
+                         struct cl_lock_descr *descr)
 {
         struct cl_io_lock_link *link;
         int result;
@@ -591,7 +637,6 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
         OBD_ALLOC_PTR(link);
         if (link != NULL) {
                 link->cill_descr     = *descr;
-                link->cill_enq_flags = enqflags;
                 link->cill_fini      = cl_free_io_lock_link;
                 result = cl_io_lock_add(env, io, link);
                 if (result) /* lock match */
@@ -668,7 +713,7 @@ cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
  */
 static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
 {
-        int     result;
+        int     result = 1;
         loff_t  start;
         loff_t  end;
         pgoff_t idx;
@@ -681,10 +726,13 @@ static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
                  * check that [start, end) and [pos, pos + count) extents
                  * overlap.
                  */
-                start = cl_offset(page->cp_obj, idx);
-                end   = cl_offset(page->cp_obj, idx + 1);
-                result = io->u.ci_rw.crw_pos < end &&
-                        start < io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
+                if (!cl_io_is_append(io)) {
+                        const struct cl_io_rw_common *crw = &(io->u.ci_rw);
+                        start = cl_offset(page->cp_obj, idx);
+                        end   = cl_offset(page->cp_obj, idx + 1);
+                        result = crw->crw_pos < end &&
+                                 start < crw->crw_pos + crw->crw_count;
+                }
                 break;
         case CIT_FAULT:
                 result = io->u.ci_fault.ft_index == idx;
@@ -855,14 +903,14 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
         /*
          * If ->cio_submit() failed, no pages were sent.
          */
-        LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
+        LASSERT(ergo(result != 0, cfs_list_empty(&queue->c2_qout.pl_pages)));
         RETURN(result);
 }
 EXPORT_SYMBOL(cl_io_submit_rw);
 
 /**
  * Submit a sync_io and wait for the IO to be finished, or error happens.
- * If @timeout is zero, it means to wait for the IO unconditionally.
+ * If \a timeout is zero, it means to wait for the IO unconditionally.
  */
 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
                       enum cl_req_type iot, struct cl_2queue *queue,
@@ -897,7 +945,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
                  rc = cl_sync_io_wait(env, io, &queue->c2_qout,
                                       anchor, timeout);
         } else {
-                LASSERT(list_empty(&queue->c2_qout.pl_pages));
+                LASSERT(cfs_list_empty(&queue->c2_qout.pl_pages));
                 cl_page_list_for_each(pg, &queue->c2_qin)
                         pg->cp_sync_io = NULL;
         }
@@ -999,13 +1047,13 @@ void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
                      struct cl_object *obj,
                      const struct cl_io_operations *ops)
 {
-        struct list_head *linkage = &slice->cis_linkage;
+        cfs_list_t *linkage = &slice->cis_linkage;
 
         LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
-                list_empty(linkage));
+                cfs_list_empty(linkage));
         ENTRY;
 
-        list_add_tail(linkage, &io->ci_layers);
+        cfs_list_add_tail(linkage, &io->ci_layers);
         slice->cis_io  = io;
         slice->cis_obj = obj;
         slice->cis_iop = ops;
@@ -1038,11 +1086,11 @@ void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
         LASSERT(page->cp_owner != NULL);
         LINVRNT(plist->pl_owner == cfs_current());
 
-        lockdep_off();
-        mutex_lock(&page->cp_mutex);
-        lockdep_on();
-        LASSERT(list_empty(&page->cp_batch));
-        list_add_tail(&page->cp_batch, &plist->pl_pages);
+        cfs_lockdep_off();
+        cfs_mutex_lock(&page->cp_mutex);
+        cfs_lockdep_on();
+        LASSERT(cfs_list_empty(&page->cp_batch));
+        cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
         ++plist->pl_nr;
         page->cp_queue_ref = lu_ref_add(&page->cp_reference, "queue", plist);
         cl_page_get(page);
@@ -1060,10 +1108,10 @@ void cl_page_list_del(const struct lu_env *env,
         LINVRNT(plist->pl_owner == cfs_current());
 
         ENTRY;
-        list_del_init(&page->cp_batch);
-        lockdep_off();
-        mutex_unlock(&page->cp_mutex);
-        lockdep_on();
+        cfs_list_del_init(&page->cp_batch);
+        cfs_lockdep_off();
+        cfs_mutex_unlock(&page->cp_mutex);
+        cfs_lockdep_on();
         --plist->pl_nr;
         lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist);
         cl_page_put(env, page);
@@ -1082,7 +1130,7 @@ void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
         LINVRNT(src->pl_owner == cfs_current());
 
         ENTRY;
-        list_move_tail(&page->cp_batch, &dst->pl_pages);
+        cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
         --src->pl_nr;
         ++dst->pl_nr;
         lu_ref_set_at(&page->cp_reference,
@@ -1127,10 +1175,10 @@ void cl_page_list_disown(const struct lu_env *env,
         cl_page_list_for_each_safe(page, temp, plist) {
                 LASSERT(plist->pl_nr > 0);
 
-                list_del_init(&page->cp_batch);
-                lockdep_off();
-                mutex_unlock(&page->cp_mutex);
-                lockdep_on();
+                cfs_list_del_init(&page->cp_batch);
+                cfs_lockdep_off();
+                cfs_mutex_unlock(&page->cp_mutex);
+                cfs_lockdep_on();
                 --plist->pl_nr;
                 /*
                  * cl_page_disown0 rather than usual cl_page_disown() is used,
@@ -1365,7 +1413,7 @@ void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
                       const struct cl_req_operations *ops)
 {
         ENTRY;
-        list_add_tail(&slice->crs_linkage, &req->crq_layers);
+        cfs_list_add_tail(&slice->crs_linkage, &req->crq_layers);
         slice->crs_dev = dev;
         slice->crs_ops = ops;
         slice->crs_req = req;
@@ -1377,9 +1425,9 @@ static void cl_req_free(const struct lu_env *env, struct cl_req *req)
 {
         unsigned i;
 
-        LASSERT(list_empty(&req->crq_pages));
+        LASSERT(cfs_list_empty(&req->crq_pages));
         LASSERT(req->crq_nrpages == 0);
-        LINVRNT(list_empty(&req->crq_layers));
+        LINVRNT(cfs_list_empty(&req->crq_layers));
         LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
         ENTRY;
 
@@ -1410,7 +1458,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req,
         result = 0;
         page = cl_page_top(page);
         do {
-                list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+                cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
                         dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
                         if (dev->cd_ops->cdo_req_init != NULL) {
                                 result = dev->cd_ops->cdo_req_init(env,
@@ -1436,10 +1484,10 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
         /*
          * for the lack of list_for_each_entry_reverse_safe()...
          */
-        while (!list_empty(&req->crq_layers)) {
-                slice = list_entry(req->crq_layers.prev,
-                                   struct cl_req_slice, crs_linkage);
-                list_del_init(&slice->crs_linkage);
+        while (!cfs_list_empty(&req->crq_layers)) {
+                slice = cfs_list_entry(req->crq_layers.prev,
+                                       struct cl_req_slice, crs_linkage);
+                cfs_list_del_init(&slice->crs_linkage);
                 if (slice->crs_ops->cro_completion != NULL)
                         slice->crs_ops->cro_completion(env, slice, rc);
         }
@@ -1495,11 +1543,13 @@ void cl_req_page_add(const struct lu_env *env,
         ENTRY;
         page = cl_page_top(page);
 
-        LINVRNT(cl_page_is_vmlocked(env, page));
-        LASSERT(list_empty(&page->cp_flight));
+        LASSERT(cfs_list_empty(&page->cp_flight));
         LASSERT(page->cp_req == NULL);
 
-        list_add_tail(&page->cp_flight, &req->crq_pages);
+        CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
+                      req, req->crq_type, req->crq_nrpages);
+
+        cfs_list_add_tail(&page->cp_flight, &req->crq_pages);
         ++req->crq_nrpages;
         page->cp_req = req;
         obj = cl_object_top(page->cp_obj);
@@ -1527,11 +1577,10 @@ void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
         ENTRY;
         page = cl_page_top(page);
 
-        LINVRNT(cl_page_is_vmlocked(env, page));
-        LASSERT(!list_empty(&page->cp_flight));
+        LASSERT(!cfs_list_empty(&page->cp_flight));
         LASSERT(req->crq_nrpages > 0);
 
-        list_del_init(&page->cp_flight);
+        cfs_list_del_init(&page->cp_flight);
         --req->crq_nrpages;
         page->cp_req = NULL;
         EXIT;
@@ -1557,7 +1606,7 @@ int cl_req_prep(const struct lu_env *env, struct cl_req *req)
                 LASSERT(req->crq_o[i].ro_obj != NULL);
 
         result = 0;
-        list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
+        cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
                 if (slice->crs_ops->cro_prep != NULL) {
                         result = slice->crs_ops->cro_prep(env, slice);
                         if (result != 0)
@@ -1580,14 +1629,14 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
         struct cl_page            *page;
         int i;
 
-        LASSERT(!list_empty(&req->crq_pages));
+        LASSERT(!cfs_list_empty(&req->crq_pages));
         ENTRY;
 
         /* Take any page to use as a model. */
-        page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
+        page = cfs_list_entry(req->crq_pages.next, struct cl_page, cp_flight);
 
         for (i = 0; i < req->crq_nrobjs; ++i) {
-                list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
+                cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
                         const struct cl_page_slice *scan;
                         const struct cl_object     *obj;
 
@@ -1619,7 +1668,7 @@ void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
 {
         ENTRY;
         cfs_waitq_init(&anchor->csi_waitq);
-        atomic_set(&anchor->csi_sync_nr, nrpages);
+        cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
         anchor->csi_sync_rc  = 0;
         EXIT;
 }
@@ -1641,30 +1690,23 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
         LASSERT(timeout >= 0);
 
         rc = l_wait_event(anchor->csi_waitq,
-                          atomic_read(&anchor->csi_sync_nr) == 0,
+                          cfs_atomic_read(&anchor->csi_sync_nr) == 0,
                           &lwi);
         if (rc < 0) {
-                int rc2;
-
                 CERROR("SYNC IO failed with error: %d, try to cancel "
-                       "the remaining page\n", rc);
-
-                rc2 = cl_io_cancel(env, io, queue);
-                if (rc2 < 0) {
-                        lwi = (struct l_wait_info) { 0 };
-                        /* Too bad, some pages are still in IO. */
-                        CERROR("Failed to cancel transfer error: %d, mostly "
-                               "because of they are still being transferred, "
-                               "waiting for %i pages\n",
-                               rc2, atomic_read(&anchor->csi_sync_nr));
-                        (void)l_wait_event(anchor->csi_waitq,
-                                     atomic_read(&anchor->csi_sync_nr) == 0,
-                                     &lwi);
-                }
+                       "%d remaining pages\n",
+                       rc, cfs_atomic_read(&anchor->csi_sync_nr));
+
+                (void)cl_io_cancel(env, io, queue);
+
+                lwi = (struct l_wait_info) { 0 };
+                (void)l_wait_event(anchor->csi_waitq,
+                                   cfs_atomic_read(&anchor->csi_sync_nr) == 0,
+                                   &lwi);
         } else {
                 rc = anchor->csi_sync_rc;
         }
-        LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
+        LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0);
         cl_page_list_assume(env, io, queue);
         POISON(anchor, 0x5a, sizeof *anchor);
         RETURN(rc);
@@ -1684,7 +1726,8 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
          * ->{prepare,commit}_write(). Completion is used to signal the end of
          * IO.
          */
-        if (atomic_dec_and_test(&anchor->csi_sync_nr))
+        LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
+        if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr))
                 cfs_waitq_broadcast(&anchor->csi_waitq);
         EXIT;
 }