Whamcloud - gitweb
LU-5710 all: second batch of corrected typos and grammar errors
[fs/lustre-release.git] / lustre / osc / osc_page.c
index a673b91..72285ba 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -82,15 +82,15 @@ static int osc_page_is_dlocked(const struct lu_env *env,
        page = opg->ops_cl.cpl_page;
        obj = cl2osc(opg->ops_cl.cpl_obj);
 
-        flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
-        if (pending)
-                flags |= LDLM_FL_CBPENDING;
+       flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
+       if (pending)
+               flags |= LDLM_FL_CBPENDING;
 
-        dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
-        osc_lock_build_res(env, obj, resname);
-        osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
-        return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
-                              dlmmode, &flags, NULL, lockh, unref);
+       dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
+       osc_lock_build_res(env, obj, resname);
+       osc_index2policy(policy, page->cp_obj, osc_index(opg), osc_index(opg));
+       return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
+                             dlmmode, &flags, NULL, lockh, unref);
 }
 
 /**
@@ -125,10 +125,10 @@ static int osc_page_protected(const struct lu_env *env,
                 hdr = cl_object_header(opg->ops_cl.cpl_obj);
                 descr = &osc_env_info(env)->oti_descr;
                 descr->cld_mode = mode;
-                descr->cld_start = page->cp_index;
-                descr->cld_end   = page->cp_index;
+               descr->cld_start = osc_index(opg);
+               descr->cld_end   = osc_index(opg);
                spin_lock(&hdr->coh_lock_guard);
-                cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
+               list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
                         /*
                          * Lock-less sub-lock has to be either in HELD state
                          * (when io is actively going on), or in CACHED state,
@@ -163,34 +163,26 @@ static int osc_page_protected(const struct lu_env *env,
  * Page operations.
  *
  */
-static void osc_page_fini(const struct lu_env *env,
-                          struct cl_page_slice *slice)
-{
-        struct osc_page *opg = cl2osc_page(slice);
-        CDEBUG(D_TRACE, "%p\n", opg);
-        LASSERT(opg->ops_lock == NULL);
-}
-
 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
 {
-        struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+       struct cl_page *page = opg->ops_cl.cpl_page;
 
-        LASSERT(!opg->ops_transfer_pinned);
-        cl_page_get(page);
-        lu_ref_add_atomic(&page->cp_reference, label, page);
-        opg->ops_transfer_pinned = 1;
+       LASSERT(!opg->ops_transfer_pinned);
+       cl_page_get(page);
+       lu_ref_add_atomic(&page->cp_reference, label, page);
+       opg->ops_transfer_pinned = 1;
 }
 
 static void osc_page_transfer_put(const struct lu_env *env,
-                                  struct osc_page *opg)
+                                 struct osc_page *opg)
 {
-        struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+       struct cl_page *page = opg->ops_cl.cpl_page;
 
-        if (opg->ops_transfer_pinned) {
-                lu_ref_del(&page->cp_reference, "transfer", page);
-                opg->ops_transfer_pinned = 0;
-                cl_page_put(env, page);
-        }
+       if (opg->ops_transfer_pinned) {
+               opg->ops_transfer_pinned = 0;
+               lu_ref_del(&page->cp_reference, "transfer", page);
+               cl_page_put(env, page);
+       }
 }
 
 /**
@@ -208,16 +200,14 @@ static void osc_page_transfer_add(const struct lu_env *env,
        osc_lru_use(osc_cli(obj), opg);
 
        spin_lock(&obj->oo_seatbelt);
-       cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
+       list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
        opg->ops_submitter = current;
        spin_unlock(&obj->oo_seatbelt);
 }
 
-static int osc_page_cache_add(const struct lu_env *env,
-                             const struct cl_page_slice *slice,
-                             struct cl_io *io)
+int osc_page_cache_add(const struct lu_env *env,
+                       const struct cl_page_slice *slice, struct cl_io *io)
 {
-       struct osc_io   *oio = osc_env_io(env);
        struct osc_page *opg = cl2osc_page(slice);
        int result;
        ENTRY;
@@ -231,16 +221,6 @@ static int osc_page_cache_add(const struct lu_env *env,
        else
                osc_page_transfer_add(env, opg, CRT_WRITE);
 
-       /* for sync write, kernel will wait for this page to be flushed before
-        * osc_io_end() is called, so release it earlier.
-        * for mkwrite(), it's known there is no further pages. */
-       if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
-               if (oio->oi_active != NULL) {
-                       osc_extent_release(env, oio->oi_active);
-                       oio->oi_active = NULL;
-               }
-       }
-
        RETURN(result);
 }
 
@@ -252,101 +232,9 @@ void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
         policy->l_extent.end   = cl_offset(obj, end + 1) - 1;
 }
 
-static int osc_page_addref_lock(const struct lu_env *env,
-                                struct osc_page *opg,
-                                struct cl_lock *lock)
-{
-        struct osc_lock *olock;
-        int              rc;
-
-        LASSERT(opg->ops_lock == NULL);
-
-        olock = osc_lock_at(lock);
-        if (cfs_atomic_inc_return(&olock->ols_pageref) <= 0) {
-                cfs_atomic_dec(&olock->ols_pageref);
-                rc = -ENODATA;
-        } else {
-               cl_lock_get(lock);
-                opg->ops_lock = lock;
-                rc = 0;
-        }
-        return rc;
-}
-
-static void osc_page_putref_lock(const struct lu_env *env,
-                                 struct osc_page *opg)
+static const char *osc_list(struct list_head *head)
 {
-        struct cl_lock  *lock = opg->ops_lock;
-        struct osc_lock *olock;
-
-        LASSERT(lock != NULL);
-        olock = osc_lock_at(lock);
-
-        cfs_atomic_dec(&olock->ols_pageref);
-        opg->ops_lock = NULL;
-
-        cl_lock_put(env, lock);
-}
-
-static int osc_page_is_under_lock(const struct lu_env *env,
-                                  const struct cl_page_slice *slice,
-                                  struct cl_io *unused)
-{
-        struct cl_lock *lock;
-        int             result = -ENODATA;
-
-        ENTRY;
-        lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
-                               NULL, 1, 0);
-        if (lock != NULL) {
-               if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
-                       result = -EBUSY;
-               cl_lock_put(env, lock);
-       }
-        RETURN(result);
-}
-
-static void osc_page_disown(const struct lu_env *env,
-                            const struct cl_page_slice *slice,
-                            struct cl_io *io)
-{
-        struct osc_page *opg = cl2osc_page(slice);
-
-        if (unlikely(opg->ops_lock))
-                osc_page_putref_lock(env, opg);
-}
-
-static void osc_page_completion_read(const struct lu_env *env,
-                                     const struct cl_page_slice *slice,
-                                     int ioret)
-{
-       struct osc_page   *opg = cl2osc_page(slice);
-
-       if (likely(opg->ops_lock))
-               osc_page_putref_lock(env, opg);
-}
-
-static void osc_page_completion_write(const struct lu_env *env,
-                                     const struct cl_page_slice *slice,
-                                     int ioret)
-{
-}
-
-static int osc_page_fail(const struct lu_env *env,
-                         const struct cl_page_slice *slice,
-                         struct cl_io *unused)
-{
-        /*
-         * Cached read?
-         */
-        LBUG();
-        return 0;
-}
-
-
-static const char *osc_list(cfs_list_t *head)
-{
-        return cfs_list_empty(head) ? "-" : "+";
+       return list_empty(head) ? "-" : "+";
 }
 
 static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
@@ -366,13 +254,13 @@ static int osc_page_print(const struct lu_env *env,
         struct osc_object     *obj = cl2osc(slice->cpl_obj);
         struct client_obd     *cli = &osc_export(obj)->exp_obd->u.cli;
 
-        return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
+       return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p %lu: "
                          "1< %#x %d %u %s %s > "
-                         "2< "LPU64" %u %u %#x %#x | %p %p %p > "
+                         "2< "LPD64" %u %u %#x %#x | %p %p %p > "
                          "3< %s %p %d %lu %d > "
                          "4< %d %d %d %lu %s | %s %s %s %s > "
                          "5< %s %s %s %s | %d %s | %d %s %s>\n",
-                          opg,
+                         opg, osc_index(opg),
                           /* 1 */
                           oap->oap_magic, oap->oap_cmd,
                           oap->oap_interrupted,
@@ -400,9 +288,9 @@ static int osc_page_print(const struct lu_env *env,
                          osc_list(&obj->oo_hp_ready_item),
                          osc_list(&obj->oo_write_item),
                          osc_list(&obj->oo_read_item),
-                         cfs_atomic_read(&obj->oo_nr_reads),
+                         atomic_read(&obj->oo_nr_reads),
                          osc_list(&obj->oo_reading_exts),
-                         cfs_atomic_read(&obj->oo_nr_writes),
+                         atomic_read(&obj->oo_nr_writes),
                          osc_list(&obj->oo_hp_exts),
                          osc_list(&obj->oo_urgent_exts));
 }
@@ -420,16 +308,16 @@ static void osc_page_delete(const struct lu_env *env,
         CDEBUG(D_TRACE, "%p\n", opg);
         osc_page_transfer_put(env, opg);
        rc = osc_teardown_async_page(env, obj, opg);
-        if (rc) {
-                CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
-                              "Trying to teardown failed: %d\n", rc);
-                LASSERT(0);
-        }
+       if (rc) {
+               CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page,
+                             "Trying to teardown failed: %d\n", rc);
+               LASSERT(0);
+       }
 
        spin_lock(&obj->oo_seatbelt);
        if (opg->ops_submitter != NULL) {
-               LASSERT(!cfs_list_empty(&opg->ops_inflight));
-               cfs_list_del_init(&opg->ops_inflight);
+               LASSERT(!list_empty(&opg->ops_inflight));
+               list_del_init(&opg->ops_inflight);
                opg->ops_submitter = NULL;
        }
        spin_unlock(&obj->oo_seatbelt);
@@ -451,8 +339,9 @@ static void osc_page_delete(const struct lu_env *env,
        EXIT;
 }
 
-void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
-                   int from, int to)
+static void osc_page_clip(const struct lu_env *env,
+                         const struct cl_page_slice *slice,
+                         int from, int to)
 {
         struct osc_page       *opg = cl2osc_page(slice);
         struct osc_async_page *oap = &opg->ops_oap;
@@ -495,28 +384,15 @@ static int osc_page_flush(const struct lu_env *env,
 }
 
 static const struct cl_page_operations osc_page_ops = {
-        .cpo_fini          = osc_page_fini,
-        .cpo_print         = osc_page_print,
-        .cpo_delete        = osc_page_delete,
-        .cpo_is_under_lock = osc_page_is_under_lock,
-        .cpo_disown        = osc_page_disown,
-        .io = {
-                [CRT_READ] = {
-                        .cpo_cache_add  = osc_page_fail,
-                        .cpo_completion = osc_page_completion_read
-                },
-                [CRT_WRITE] = {
-                       .cpo_cache_add  = osc_page_cache_add,
-                       .cpo_completion = osc_page_completion_write
-               }
-       },
+       .cpo_print         = osc_page_print,
+       .cpo_delete        = osc_page_delete,
        .cpo_clip           = osc_page_clip,
        .cpo_cancel         = osc_page_cancel,
        .cpo_flush          = osc_page_flush
 };
 
 int osc_page_init(const struct lu_env *env, struct cl_object *obj,
-                 struct cl_page *page, struct page *vmpage)
+                 struct cl_page *page, pgoff_t index)
 {
        struct osc_object *osc = cl2osc(obj);
        struct osc_page   *opg = cl_object_page_slice(obj, page);
@@ -525,13 +401,13 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
        opg->ops_from = 0;
        opg->ops_to   = PAGE_CACHE_SIZE;
 
-       result = osc_prep_async_page(osc, opg, vmpage,
-                                       cl_offset(obj, page->cp_index));
+       result = osc_prep_async_page(osc, opg, page->cp_vmpage,
+                                    cl_offset(obj, index));
        if (result == 0) {
                struct osc_io *oio = osc_env_io(env);
                opg->ops_srvlock = osc_io_srvlock(oio);
-               cl_page_slice_add(page, &opg->ops_cl, obj,
-                               &osc_page_ops);
+               cl_page_slice_add(page, &opg->ops_cl, obj, index,
+                                 &osc_page_ops);
        }
        /*
         * Cannot assert osc_page_protected() here as read-ahead
@@ -542,16 +418,15 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
 #endif
        /* ops_inflight and ops_lru are the same field, but it doesn't
         * hurt to initialize it twice :-) */
-       CFS_INIT_LIST_HEAD(&opg->ops_inflight);
-       CFS_INIT_LIST_HEAD(&opg->ops_lru);
+       INIT_LIST_HEAD(&opg->ops_inflight);
+       INIT_LIST_HEAD(&opg->ops_lru);
 
        /* reserve an LRU space for this page */
        if (page->cp_type == CPT_CACHEABLE && result == 0) {
                result = osc_lru_reserve(env, osc, opg);
                if (result == 0) {
                        spin_lock(&osc->oo_tree_lock);
-                       result = radix_tree_insert(&osc->oo_tree,
-                                                  page->cp_index, opg);
+                       result = radix_tree_insert(&osc->oo_tree, index, opg);
                        if (result == 0)
                                ++osc->oo_npages;
                        spin_unlock(&osc->oo_tree_lock);
@@ -562,30 +437,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
        return result;
 }
 
-int osc_over_unstable_soft_limit(struct client_obd *cli)
-{
-       long obd_upages, obd_dpages, osc_upages;
-
-       /* Can't check cli->cl_unstable_count, therefore, no soft limit */
-       if (cli == NULL)
-               return 0;
-
-       obd_upages = cfs_atomic_read(&obd_unstable_pages);
-       obd_dpages = cfs_atomic_read(&obd_dirty_pages);
-
-       osc_upages = cfs_atomic_read(&cli->cl_unstable_count);
-
-       /* obd_max_dirty_pages is the max number of (dirty + unstable)
-        * pages allowed at any given time. To simulate an unstable page
-        * only limit, we subtract the current number of dirty pages
-        * from this max. This difference is roughly the amount of pages
-        * currently available for unstable pages. Thus, the soft limit
-        * is half of that difference. Check osc_upages to ensure we don't
-        * set SOFT_SYNC for OSCs without any outstanding unstable pages. */
-       return osc_upages != 0 &&
-              obd_upages >= (obd_max_dirty_pages - obd_dpages) / 2;
-}
-
 /**
  * Helper function called by osc_io_submit() for every page in an immediate
  * transfer (i.e., transferred synchronously).
@@ -609,9 +460,6 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
        oap->oap_count     = opg->ops_to - opg->ops_from;
        oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
 
-       if (osc_over_unstable_soft_limit(oap->oap_cli))
-               oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
-
        if (!client_is_remote(osc_export(obj)) &&
                        cfs_capable(CFS_CAP_SYS_RESOURCE)) {
                oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
@@ -649,20 +497,24 @@ static const int lru_shrink_max = 8 << (20 - PAGE_CACHE_SHIFT); /* 8M */
 static int osc_cache_too_much(struct client_obd *cli)
 {
        struct cl_client_cache *cache = cli->cl_cache;
-       int pages = cfs_atomic_read(&cli->cl_lru_in_list);
+       long pages = atomic_long_read(&cli->cl_lru_in_list);
        unsigned long budget;
 
-       budget = cache->ccc_lru_max / cfs_atomic_read(&cache->ccc_users);
+       LASSERT(cache != NULL);
+       budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2);
 
        /* if it's going to run out LRU slots, we should free some, but not
         * too much to maintain faireness among OSCs. */
-       if (cfs_atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+       if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
                if (pages >= budget)
                        return lru_shrink_max;
                else if (pages >= budget / 2)
                        return lru_shrink_min;
+#if 0
        } else if (pages >= budget * 2)
                return lru_shrink_min;
+#endif
+       }
        return 0;
 }
 
@@ -678,29 +530,29 @@ int lru_queue_work(const struct lu_env *env, void *data)
        RETURN(0);
 }
 
-void osc_lru_add_batch(struct client_obd *cli, cfs_list_t *plist)
+void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
 {
-       CFS_LIST_HEAD(lru);
+       struct list_head lru = LIST_HEAD_INIT(lru);
        struct osc_async_page *oap;
-       int npages = 0;
+       long npages = 0;
 
-       cfs_list_for_each_entry(oap, plist, oap_pending_item) {
+       list_for_each_entry(oap, plist, oap_pending_item) {
                struct osc_page *opg = oap2osc_page(oap);
 
                if (!opg->ops_in_lru)
                        continue;
 
                ++npages;
-               LASSERT(cfs_list_empty(&opg->ops_lru));
-               cfs_list_add(&opg->ops_lru, &lru);
+               LASSERT(list_empty(&opg->ops_lru));
+               list_add(&opg->ops_lru, &lru);
        }
 
        if (npages > 0) {
-               client_obd_list_lock(&cli->cl_lru_list_lock);
-               cfs_list_splice_tail(&lru, &cli->cl_lru_list);
-               cfs_atomic_sub(npages, &cli->cl_lru_busy);
-               cfs_atomic_add(npages, &cli->cl_lru_in_list);
-               client_obd_list_unlock(&cli->cl_lru_list_lock);
+               spin_lock(&cli->cl_lru_list_lock);
+               list_splice_tail(&lru, &cli->cl_lru_list);
+               atomic_long_sub(npages, &cli->cl_lru_busy);
+               atomic_long_add(npages, &cli->cl_lru_in_list);
+               spin_unlock(&cli->cl_lru_list_lock);
 
                /* XXX: May set force to be true for better performance */
                if (osc_cache_too_much(cli))
@@ -710,9 +562,9 @@ void osc_lru_add_batch(struct client_obd *cli, cfs_list_t *plist)
 
 static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
 {
-       LASSERT(cfs_atomic_read(&cli->cl_lru_in_list) > 0);
-       cfs_list_del_init(&opg->ops_lru);
-       cfs_atomic_dec(&cli->cl_lru_in_list);
+       LASSERT(atomic_long_read(&cli->cl_lru_in_list) > 0);
+       list_del_init(&opg->ops_lru);
+       atomic_long_dec(&cli->cl_lru_in_list);
 }
 
 /**
@@ -722,16 +574,16 @@ static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
 static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
 {
        if (opg->ops_in_lru) {
-               client_obd_list_lock(&cli->cl_lru_list_lock);
-               if (!cfs_list_empty(&opg->ops_lru)) {
+               spin_lock(&cli->cl_lru_list_lock);
+               if (!list_empty(&opg->ops_lru)) {
                        __osc_lru_del(cli, opg);
                } else {
-                       LASSERT(cfs_atomic_read(&cli->cl_lru_busy) > 0);
-                       cfs_atomic_dec(&cli->cl_lru_busy);
+                       LASSERT(atomic_long_read(&cli->cl_lru_busy) > 0);
+                       atomic_long_dec(&cli->cl_lru_busy);
                }
-               client_obd_list_unlock(&cli->cl_lru_list_lock);
+               spin_unlock(&cli->cl_lru_list_lock);
 
-               cfs_atomic_inc(cli->cl_lru_left);
+               atomic_long_inc(cli->cl_lru_left);
                /* this is a great place to release more LRU pages if
                 * this osc occupies too many LRU pages and kernel is
                 * stealing one of them. */
@@ -739,7 +591,7 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
                        (void)ptlrpcd_queue_work(cli->cl_lru_work);
                wake_up(&osc_lru_waitq);
        } else {
-               LASSERT(cfs_list_empty(&opg->ops_lru));
+               LASSERT(list_empty(&opg->ops_lru));
        }
 }
 
@@ -748,13 +600,13 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
  */
 static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
 {
-       /* If page is being transfered for the first time,
+       /* If page is being transferred for the first time,
         * ops_lru should be empty */
-       if (opg->ops_in_lru && !cfs_list_empty(&opg->ops_lru)) {
-               client_obd_list_lock(&cli->cl_lru_list_lock);
+       if (opg->ops_in_lru && !list_empty(&opg->ops_lru)) {
+               spin_lock(&cli->cl_lru_list_lock);
                __osc_lru_del(cli, opg);
-               client_obd_list_unlock(&cli->cl_lru_list_lock);
-               cfs_atomic_inc(&cli->cl_lru_busy);
+               spin_unlock(&cli->cl_lru_list_lock);
+               atomic_long_inc(&cli->cl_lru_busy);
        }
 }
 
@@ -776,54 +628,76 @@ static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
 }
 
 /**
+ * Check if a cl_page can be released, i.e, it's not being used.
+ *
+ * If unstable account is turned on, bulk transfer may hold one refcount
+ * for recovery so we need to check vmpage refcount as well; otherwise,
+ * even we can destroy cl_page but the corresponding vmpage can't be reused.
+ */
+static inline bool lru_page_busy(struct client_obd *cli, struct cl_page *page)
+{
+       if (cl_page_in_use_noref(page))
+               return true;
+
+       if (cli->cl_cache->ccc_unstable_check) {
+               struct page *vmpage = cl_page_vmpage(page);
+
+               /* vmpage have two known users: cl_page and VM page cache */
+               if (page_count(vmpage) - page_mapcount(vmpage) > 2)
+                       return true;
+       }
+       return false;
+}
+
+/**
  * Drop @target of pages from LRU at most.
  */
-int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
-                  int target, bool force)
+long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+                  long target, bool force)
 {
        struct cl_io *io;
        struct cl_object *clobj = NULL;
        struct cl_page **pvec;
        struct osc_page *opg;
+       long count = 0;
        int maxscan = 0;
-       int count = 0;
        int index = 0;
        int rc = 0;
        ENTRY;
 
-       LASSERT(cfs_atomic_read(&cli->cl_lru_in_list) >= 0);
-       if (cfs_atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
+       LASSERT(atomic_long_read(&cli->cl_lru_in_list) >= 0);
+       if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0)
                RETURN(0);
 
        if (!force) {
-               if (cfs_atomic_read(&cli->cl_lru_shrinkers) > 0)
+               if (atomic_read(&cli->cl_lru_shrinkers) > 0)
                        RETURN(-EBUSY);
 
-               if (cfs_atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
-                       cfs_atomic_dec(&cli->cl_lru_shrinkers);
+               if (atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
+                       atomic_dec(&cli->cl_lru_shrinkers);
                        RETURN(-EBUSY);
                }
        } else {
-               cfs_atomic_inc(&cli->cl_lru_shrinkers);
+               atomic_inc(&cli->cl_lru_shrinkers);
        }
 
        pvec = (struct cl_page **)osc_env_info(env)->oti_pvec;
        io = &osc_env_info(env)->oti_io;
 
-       client_obd_list_lock(&cli->cl_lru_list_lock);
-       maxscan = min(target << 1, cfs_atomic_read(&cli->cl_lru_in_list));
-       while (!cfs_list_empty(&cli->cl_lru_list)) {
+       spin_lock(&cli->cl_lru_list_lock);
+       maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
+       while (!list_empty(&cli->cl_lru_list)) {
                struct cl_page *page;
                bool will_free = false;
 
                if (--maxscan < 0)
                        break;
 
-               opg = cfs_list_entry(cli->cl_lru_list.next, struct osc_page,
-                                    ops_lru);
-               page = cl_page_top(opg->ops_cl.cpl_page);
-               if (cl_page_in_use_noref(page)) {
-                       cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+               opg = list_entry(cli->cl_lru_list.next, struct osc_page,
+                                ops_lru);
+               page = opg->ops_cl.cpl_page;
+               if (lru_page_busy(cli, page)) {
+                       list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
                        continue;
                }
 
@@ -832,7 +706,7 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
                        struct cl_object *tmp = page->cp_obj;
 
                        cl_object_get(tmp);
-                       client_obd_list_unlock(&cli->cl_lru_list_lock);
+                       spin_unlock(&cli->cl_lru_list_lock);
 
                        if (clobj != NULL) {
                                discard_pagevec(env, io, pvec, index);
@@ -848,7 +722,7 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
                        io->ci_ignore_layout = 1;
                        rc = cl_io_init(env, io, CIT_MISC, clobj);
 
-                       client_obd_list_lock(&cli->cl_lru_list_lock);
+                       spin_lock(&cli->cl_lru_list_lock);
 
                        if (rc != 0)
                                break;
@@ -858,7 +732,7 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
                }
 
                if (cl_page_own_try(env, io, page) == 0) {
-                       if (!cl_page_in_use_noref(page)) {
+                       if (!lru_page_busy(cli, page)) {
                                /* remove it from lru list earlier to avoid
                                 * lock contention */
                                __osc_lru_del(cli, opg);
@@ -872,24 +746,24 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
                }
 
                if (!will_free) {
-                       cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+                       list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
                        continue;
                }
 
                /* Don't discard and free the page with cl_lru_list held */
                pvec[index++] = page;
                if (unlikely(index == OTI_PVEC_SIZE)) {
-                       client_obd_list_unlock(&cli->cl_lru_list_lock);
+                       spin_unlock(&cli->cl_lru_list_lock);
                        discard_pagevec(env, io, pvec, index);
                        index = 0;
 
-                       client_obd_list_lock(&cli->cl_lru_list_lock);
+                       spin_lock(&cli->cl_lru_list_lock);
                }
 
                if (++count >= target)
                        break;
        }
-       client_obd_list_unlock(&cli->cl_lru_list_lock);
+       spin_unlock(&cli->cl_lru_list_lock);
 
        if (clobj != NULL) {
                discard_pagevec(env, io, pvec, index);
@@ -898,30 +772,25 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
                cl_object_put(env, clobj);
        }
 
-       cfs_atomic_dec(&cli->cl_lru_shrinkers);
+       atomic_dec(&cli->cl_lru_shrinkers);
        if (count > 0) {
-               cfs_atomic_add(count, cli->cl_lru_left);
+               atomic_long_add(count, cli->cl_lru_left);
                wake_up_all(&osc_lru_waitq);
        }
        RETURN(count > 0 ? count : rc);
 }
 
-static inline int max_to_shrink(struct client_obd *cli)
-{
-       return min(cfs_atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
-}
-
-int osc_lru_reclaim(struct client_obd *cli)
+long osc_lru_reclaim(struct client_obd *cli)
 {
        struct cl_env_nest nest;
        struct lu_env *env;
        struct cl_client_cache *cache = cli->cl_cache;
+       long rc = 0;
        int max_scans;
-       int rc = 0;
        ENTRY;
 
        LASSERT(cache != NULL);
-       LASSERT(!cfs_list_empty(&cache->ccc_lru));
+       LASSERT(!list_empty(&cache->ccc_lru));
 
        env = cl_env_nested_get(&nest);
        if (IS_ERR(env))
@@ -932,33 +801,33 @@ int osc_lru_reclaim(struct client_obd *cli)
                if (rc == -EBUSY)
                        rc = 0;
 
-               CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
+               CDEBUG(D_CACHE, "%s: Free %ld pages from own LRU: %p.\n",
                        cli->cl_import->imp_obd->obd_name, rc, cli);
                GOTO(out, rc);
        }
 
-       CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
+       CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld, busy: %ld.\n",
                cli->cl_import->imp_obd->obd_name, cli,
-               cfs_atomic_read(&cli->cl_lru_in_list),
-               cfs_atomic_read(&cli->cl_lru_busy));
+               atomic_long_read(&cli->cl_lru_in_list),
+               atomic_long_read(&cli->cl_lru_busy));
 
        /* Reclaim LRU slots from other client_obd as it can't free enough
         * from its own. This should rarely happen. */
        spin_lock(&cache->ccc_lru_lock);
        cache->ccc_lru_shrinkers++;
-       cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
+       list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
 
-       max_scans = cfs_atomic_read(&cache->ccc_users);
-       while (--max_scans > 0 && !cfs_list_empty(&cache->ccc_lru)) {
-               cli = cfs_list_entry(cache->ccc_lru.next, struct client_obd,
-                                       cl_lru_osc);
+       max_scans = atomic_read(&cache->ccc_users) - 2;
+       while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
+               cli = list_entry(cache->ccc_lru.next, struct client_obd,
+                                cl_lru_osc);
 
-               CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
+               CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
                        cli->cl_import->imp_obd->obd_name, cli,
-                       cfs_atomic_read(&cli->cl_lru_in_list),
-                       cfs_atomic_read(&cli->cl_lru_busy));
+                       atomic_long_read(&cli->cl_lru_in_list),
+                       atomic_long_read(&cli->cl_lru_busy));
 
-               cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
+               list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
                if (osc_cache_too_much(cli) > 0) {
                        spin_unlock(&cache->ccc_lru_lock);
 
@@ -973,11 +842,18 @@ int osc_lru_reclaim(struct client_obd *cli)
 
 out:
        cl_env_nested_put(&nest, env);
-       CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
+       CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
                cli->cl_import->imp_obd->obd_name, cli, rc);
        return rc;
 }
 
+/**
+ * osc_lru_reserve() is called to reserve an LRU slot for a cl_page.
+ *
+ * Usually the LRU slots are reserved in osc_io_iter_rw_init().
+ * Only in the case that the LRU slots are in extreme shortage, it should
+ * have reserved enough slots for an IO.
+ */
 static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
                           struct osc_page *opg)
 {
@@ -995,8 +871,8 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
                goto out;
        }
 
-       LASSERT(cfs_atomic_read(cli->cl_lru_left) >= 0);
-       while (!cfs_atomic_add_unless(cli->cl_lru_left, -1, 0)) {
+       LASSERT(atomic_long_read(cli->cl_lru_left) >= 0);
+       while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) {
 
                /* run out of LRU spaces, try to drop some by itself */
                rc = osc_lru_reclaim(cli);
@@ -1007,7 +883,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
 
                cond_resched();
                rc = l_wait_event(osc_lru_waitq,
-                               cfs_atomic_read(cli->cl_lru_left) > 0,
+                               atomic_long_read(cli->cl_lru_left) > 0,
                                &lwi);
                if (rc < 0)
                        break;
@@ -1015,7 +891,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
 
 out:
        if (rc >= 0) {
-               cfs_atomic_inc(&cli->cl_lru_busy);
+               atomic_long_inc(&cli->cl_lru_busy);
                opg->ops_in_lru = 1;
                rc = 0;
        }
@@ -1023,4 +899,147 @@ out:
        RETURN(rc);
 }
 
+/**
+ * Atomic operations are expensive. We accumulate the accounting for the
+ * same page zone to get better performance.
+ * In practice this can work pretty good because the pages in the same RPC
+ * are likely from the same page zone.
+ */
+static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+                                           int factor)
+{
+       int page_count = desc->bd_iov_count;
+       void *zone = NULL;
+       int count = 0;
+       int i;
+
+       for (i = 0; i < page_count; i++) {
+               void *pz = page_zone(desc->bd_iov[i].kiov_page);
+
+               if (likely(pz == zone)) {
+                       ++count;
+                       continue;
+               }
+
+               if (count > 0) {
+                       mod_zone_page_state(zone, NR_UNSTABLE_NFS,
+                                           factor * count);
+                       count = 0;
+               }
+               zone = pz;
+               ++count;
+       }
+       if (count > 0)
+               mod_zone_page_state(zone, NR_UNSTABLE_NFS, factor * count);
+}
+
+static inline void add_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+{
+       unstable_page_accounting(desc, 1);
+}
+
+static inline void dec_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+{
+       unstable_page_accounting(desc, -1);
+}
+
+/**
+ * Performs "unstable" page accounting. This function balances the
+ * increment operations performed in osc_inc_unstable_pages. It is
+ * registered as the RPC request callback, and is executed when the
+ * bulk RPC is committed on the server. Thus at this point, the pages
+ * involved in the bulk transfer are no longer considered unstable.
+ *
+ * If this function is called, the request should have been committed
+ * or req:rq_unstable must have been set; it implies that the unstable
+ * statistic have been added.
+ */
+void osc_dec_unstable_pages(struct ptlrpc_request *req)
+{
+       struct ptlrpc_bulk_desc *desc       = req->rq_bulk;
+       struct client_obd       *cli        = &req->rq_import->imp_obd->u.cli;
+       int                      page_count = desc->bd_iov_count;
+       long                     unstable_count;
+
+       LASSERT(page_count >= 0);
+       dec_unstable_page_accounting(desc);
+
+       unstable_count = atomic_long_sub_return(page_count,
+                                               &cli->cl_unstable_count);
+       LASSERT(unstable_count >= 0);
+
+       unstable_count = atomic_long_sub_return(page_count,
+                                          &cli->cl_cache->ccc_unstable_nr);
+       LASSERT(unstable_count >= 0);
+       if (unstable_count == 0)
+               wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
+
+       if (osc_cache_too_much(cli))
+               (void)ptlrpcd_queue_work(cli->cl_lru_work);
+}
+
+/**
+ * "unstable" page accounting. See: osc_dec_unstable_pages.
+ */
+void osc_inc_unstable_pages(struct ptlrpc_request *req)
+{
+       struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+       struct client_obd       *cli  = &req->rq_import->imp_obd->u.cli;
+       long                     page_count = desc->bd_iov_count;
+
+       /* No unstable page tracking */
+       if (cli->cl_cache == NULL || !cli->cl_cache->ccc_unstable_check)
+               return;
+
+       add_unstable_page_accounting(desc);
+       atomic_long_add(page_count, &cli->cl_unstable_count);
+       atomic_long_add(page_count, &cli->cl_cache->ccc_unstable_nr);
+
+       /* If the request has already been committed (i.e. brw_commit
+        * called via rq_commit_cb), we need to undo the unstable page
+        * increments we just performed because rq_commit_cb wont be
+        * called again. */
+       spin_lock(&req->rq_lock);
+       if (unlikely(req->rq_committed)) {
+               spin_unlock(&req->rq_lock);
+
+               osc_dec_unstable_pages(req);
+       } else {
+               req->rq_unstable = 1;
+               spin_unlock(&req->rq_lock);
+       }
+}
+
+/**
+ * Check if it piggybacks SOFT_SYNC flag to OST from this OSC.
+ * This function will be called by every BRW RPC so it's critical
+ * to make this function fast.
+ */
+bool osc_over_unstable_soft_limit(struct client_obd *cli)
+{
+       long unstable_nr, osc_unstable_count;
+
+       /* Can't check cli->cl_unstable_count, therefore, no soft limit */
+       if (cli->cl_cache == NULL || !cli->cl_cache->ccc_unstable_check)
+               return false;
+
+       osc_unstable_count = atomic_long_read(&cli->cl_unstable_count);
+       unstable_nr = atomic_long_read(&cli->cl_cache->ccc_unstable_nr);
+
+       CDEBUG(D_CACHE,
+              "%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
+              cli->cl_import->imp_obd->obd_name, cli,
+              unstable_nr, osc_unstable_count);
+
+       /* If the LRU slots are in shortage - 25% remaining AND this OSC
+        * has one full RPC window of unstable pages, it's a good chance
+        * to piggyback a SOFT_SYNC flag.
+        * Please notice that the OST won't take immediate response for the
+        * SOFT_SYNC request so active OSCs will have more chance to carry
+        * the flag, this is reasonable. */
+       return unstable_nr > cli->cl_cache->ccc_lru_max >> 2 &&
+              osc_unstable_count > cli->cl_max_pages_per_rpc *
+                                   cli->cl_max_rpcs_in_flight;
+}
+
 /** @} osc */