* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
descr->cld_start = osc_index(opg);
descr->cld_end = osc_index(opg);
spin_lock(&hdr->coh_lock_guard);
- cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
+ list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
/*
* Lock-less sub-lock has to be either in HELD state
* (when io is actively going on), or in CACHED state,
osc_lru_use(osc_cli(obj), opg);
spin_lock(&obj->oo_seatbelt);
- cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
+ list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
opg->ops_submitter = current;
spin_unlock(&obj->oo_seatbelt);
}
policy->l_extent.end = cl_offset(obj, end + 1) - 1;
}
-static int osc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused, pgoff_t *max_index)
+static const char *osc_list(struct list_head *head)
{
- struct osc_page *opg = cl2osc_page(slice);
- struct cl_lock *lock;
- int result = -ENODATA;
- ENTRY;
-
- *max_index = 0;
- lock = cl_lock_at_pgoff(env, slice->cpl_obj, osc_index(opg),
- NULL, 1, 0);
- if (lock != NULL) {
- *max_index = lock->cll_descr.cld_end;
- cl_lock_put(env, lock);
- result = 0;
- }
- RETURN(result);
-}
-
-static const char *osc_list(cfs_list_t *head)
-{
- return cfs_list_empty(head) ? "-" : "+";
+ return list_empty(head) ? "-" : "+";
}
static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p %lu: "
"1< %#x %d %u %s %s > "
- "2< "LPU64" %u %u %#x %#x | %p %p %p > "
+ "2< "LPD64" %u %u %#x %#x | %p %p %p > "
"3< %s %p %d %lu %d > "
"4< %d %d %d %lu %s | %s %s %s %s > "
"5< %s %s %s %s | %d %s | %d %s %s>\n",
spin_lock(&obj->oo_seatbelt);
if (opg->ops_submitter != NULL) {
- LASSERT(!cfs_list_empty(&opg->ops_inflight));
- cfs_list_del_init(&opg->ops_inflight);
+ LASSERT(!list_empty(&opg->ops_inflight));
+ list_del_init(&opg->ops_inflight);
opg->ops_submitter = NULL;
}
spin_unlock(&obj->oo_seatbelt);
EXIT;
}
-void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
- int from, int to)
+static void osc_page_clip(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int from, int to)
{
struct osc_page *opg = cl2osc_page(slice);
struct osc_async_page *oap = &opg->ops_oap;
static const struct cl_page_operations osc_page_ops = {
.cpo_print = osc_page_print,
.cpo_delete = osc_page_delete,
- .cpo_is_under_lock = osc_page_is_under_lock,
.cpo_clip = osc_page_clip,
.cpo_cancel = osc_page_cancel,
.cpo_flush = osc_page_flush
#endif
/* ops_inflight and ops_lru are the same field, but it doesn't
* hurt to initialize it twice :-) */
- CFS_INIT_LIST_HEAD(&opg->ops_inflight);
- CFS_INIT_LIST_HEAD(&opg->ops_lru);
+ INIT_LIST_HEAD(&opg->ops_inflight);
+ INIT_LIST_HEAD(&opg->ops_lru);
/* reserve an LRU space for this page */
if (page->cp_type == CPT_CACHEABLE && result == 0) {
return result;
}
-int osc_over_unstable_soft_limit(struct client_obd *cli)
-{
- long obd_upages, obd_dpages, osc_upages;
-
- /* Can't check cli->cl_unstable_count, therefore, no soft limit */
- if (cli == NULL)
- return 0;
-
- obd_upages = atomic_read(&obd_unstable_pages);
- obd_dpages = atomic_read(&obd_dirty_pages);
-
- osc_upages = atomic_read(&cli->cl_unstable_count);
-
- /* obd_max_dirty_pages is the max number of (dirty + unstable)
- * pages allowed at any given time. To simulate an unstable page
- * only limit, we subtract the current number of dirty pages
- * from this max. This difference is roughly the amount of pages
- * currently available for unstable pages. Thus, the soft limit
- * is half of that difference. Check osc_upages to ensure we don't
- * set SOFT_SYNC for OSCs without any outstanding unstable pages. */
- return osc_upages != 0 &&
- obd_upages >= (obd_max_dirty_pages - obd_dpages) / 2;
-}
-
/**
* Helper function called by osc_io_submit() for every page in an immediate
* transfer (i.e., transferred synchronously).
oap->oap_count = opg->ops_to - opg->ops_from;
oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
- if (osc_over_unstable_soft_limit(oap->oap_cli))
- oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
-
if (!client_is_remote(osc_export(obj)) &&
cfs_capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
static int osc_cache_too_much(struct client_obd *cli)
{
struct cl_client_cache *cache = cli->cl_cache;
- int pages = atomic_read(&cli->cl_lru_in_list);
+ long pages = atomic_long_read(&cli->cl_lru_in_list);
unsigned long budget;
- budget = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
+ LASSERT(cache != NULL);
+ budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2);
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain faireness among OSCs. */
- if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+ if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
if (pages >= budget)
return lru_shrink_max;
else if (pages >= budget / 2)
return lru_shrink_min;
+#if 0
} else if (pages >= budget * 2)
return lru_shrink_min;
+#endif
+ }
return 0;
}
RETURN(0);
}
-void osc_lru_add_batch(struct client_obd *cli, cfs_list_t *plist)
+void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
{
- CFS_LIST_HEAD(lru);
+ struct list_head lru = LIST_HEAD_INIT(lru);
struct osc_async_page *oap;
- int npages = 0;
+ long npages = 0;
- cfs_list_for_each_entry(oap, plist, oap_pending_item) {
+ list_for_each_entry(oap, plist, oap_pending_item) {
struct osc_page *opg = oap2osc_page(oap);
if (!opg->ops_in_lru)
continue;
++npages;
- LASSERT(cfs_list_empty(&opg->ops_lru));
- cfs_list_add(&opg->ops_lru, &lru);
+ LASSERT(list_empty(&opg->ops_lru));
+ list_add(&opg->ops_lru, &lru);
}
if (npages > 0) {
- client_obd_list_lock(&cli->cl_lru_list_lock);
- cfs_list_splice_tail(&lru, &cli->cl_lru_list);
- atomic_sub(npages, &cli->cl_lru_busy);
- atomic_add(npages, &cli->cl_lru_in_list);
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
+ list_splice_tail(&lru, &cli->cl_lru_list);
+ atomic_long_sub(npages, &cli->cl_lru_busy);
+ atomic_long_add(npages, &cli->cl_lru_in_list);
+ spin_unlock(&cli->cl_lru_list_lock);
/* XXX: May set force to be true for better performance */
if (osc_cache_too_much(cli))
static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
{
- LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
- cfs_list_del_init(&opg->ops_lru);
- atomic_dec(&cli->cl_lru_in_list);
+ LASSERT(atomic_long_read(&cli->cl_lru_in_list) > 0);
+ list_del_init(&opg->ops_lru);
+ atomic_long_dec(&cli->cl_lru_in_list);
}
/**
static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
{
if (opg->ops_in_lru) {
- client_obd_list_lock(&cli->cl_lru_list_lock);
- if (!cfs_list_empty(&opg->ops_lru)) {
+ spin_lock(&cli->cl_lru_list_lock);
+ if (!list_empty(&opg->ops_lru)) {
__osc_lru_del(cli, opg);
} else {
- LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
- atomic_dec(&cli->cl_lru_busy);
+ LASSERT(atomic_long_read(&cli->cl_lru_busy) > 0);
+ atomic_long_dec(&cli->cl_lru_busy);
}
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
- atomic_inc(cli->cl_lru_left);
+ atomic_long_inc(cli->cl_lru_left);
/* this is a great place to release more LRU pages if
* this osc occupies too many LRU pages and kernel is
* stealing one of them. */
(void)ptlrpcd_queue_work(cli->cl_lru_work);
wake_up(&osc_lru_waitq);
} else {
- LASSERT(cfs_list_empty(&opg->ops_lru));
+ LASSERT(list_empty(&opg->ops_lru));
}
}
{
/* If page is being transfered for the first time,
* ops_lru should be empty */
- if (opg->ops_in_lru && !cfs_list_empty(&opg->ops_lru)) {
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ if (opg->ops_in_lru && !list_empty(&opg->ops_lru)) {
+ spin_lock(&cli->cl_lru_list_lock);
__osc_lru_del(cli, opg);
- client_obd_list_unlock(&cli->cl_lru_list_lock);
- atomic_inc(&cli->cl_lru_busy);
+ spin_unlock(&cli->cl_lru_list_lock);
+ atomic_long_inc(&cli->cl_lru_busy);
}
}
}
/**
+ * Check if a cl_page can be released, i.e, it's not being used.
+ *
+ * If unstable account is turned on, bulk transfer may hold one refcount
+ * for recovery so we need to check vmpage refcount as well; otherwise,
+ * even we can destroy cl_page but the corresponding vmpage can't be reused.
+ */
+static inline bool lru_page_busy(struct client_obd *cli, struct cl_page *page)
+{
+ if (cl_page_in_use_noref(page))
+ return true;
+
+ if (cli->cl_cache->ccc_unstable_check) {
+ struct page *vmpage = cl_page_vmpage(page);
+
+ /* vmpage have two known users: cl_page and VM page cache */
+ if (page_count(vmpage) - page_mapcount(vmpage) > 2)
+ return true;
+ }
+ return false;
+}
+
+/**
* Drop @target of pages from LRU at most.
*/
-int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
- int target, bool force)
+long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ long target, bool force)
{
struct cl_io *io;
struct cl_object *clobj = NULL;
struct cl_page **pvec;
struct osc_page *opg;
+ long count = 0;
int maxscan = 0;
- int count = 0;
int index = 0;
int rc = 0;
ENTRY;
- LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0);
- if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
+ LASSERT(atomic_long_read(&cli->cl_lru_in_list) >= 0);
+ if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0)
RETURN(0);
if (!force) {
pvec = (struct cl_page **)osc_env_info(env)->oti_pvec;
io = &osc_env_info(env)->oti_io;
- client_obd_list_lock(&cli->cl_lru_list_lock);
- maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
- while (!cfs_list_empty(&cli->cl_lru_list)) {
+ spin_lock(&cli->cl_lru_list_lock);
+ maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
+ while (!list_empty(&cli->cl_lru_list)) {
struct cl_page *page;
bool will_free = false;
if (--maxscan < 0)
break;
- opg = cfs_list_entry(cli->cl_lru_list.next, struct osc_page,
- ops_lru);
+ opg = list_entry(cli->cl_lru_list.next, struct osc_page,
+ ops_lru);
page = opg->ops_cl.cpl_page;
- if (cl_page_in_use_noref(page)) {
- cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ if (lru_page_busy(cli, page)) {
+ list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
continue;
}
struct cl_object *tmp = page->cp_obj;
cl_object_get(tmp);
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
if (clobj != NULL) {
discard_pagevec(env, io, pvec, index);
io->ci_ignore_layout = 1;
rc = cl_io_init(env, io, CIT_MISC, clobj);
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
if (rc != 0)
break;
}
if (cl_page_own_try(env, io, page) == 0) {
- if (!cl_page_in_use_noref(page)) {
+ if (!lru_page_busy(cli, page)) {
/* remove it from lru list earlier to avoid
* lock contention */
__osc_lru_del(cli, opg);
}
if (!will_free) {
- cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
continue;
}
/* Don't discard and free the page with cl_lru_list held */
pvec[index++] = page;
if (unlikely(index == OTI_PVEC_SIZE)) {
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
discard_pagevec(env, io, pvec, index);
index = 0;
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
}
if (++count >= target)
break;
}
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
if (clobj != NULL) {
discard_pagevec(env, io, pvec, index);
atomic_dec(&cli->cl_lru_shrinkers);
if (count > 0) {
- atomic_add(count, cli->cl_lru_left);
+ atomic_long_add(count, cli->cl_lru_left);
wake_up_all(&osc_lru_waitq);
}
RETURN(count > 0 ? count : rc);
}
-static inline int max_to_shrink(struct client_obd *cli)
-{
- return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
-}
-
-int osc_lru_reclaim(struct client_obd *cli)
+long osc_lru_reclaim(struct client_obd *cli)
{
struct cl_env_nest nest;
struct lu_env *env;
struct cl_client_cache *cache = cli->cl_cache;
+ long rc = 0;
int max_scans;
- int rc = 0;
ENTRY;
LASSERT(cache != NULL);
- LASSERT(!cfs_list_empty(&cache->ccc_lru));
+ LASSERT(!list_empty(&cache->ccc_lru));
env = cl_env_nested_get(&nest);
if (IS_ERR(env))
if (rc == -EBUSY)
rc = 0;
- CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
+ CDEBUG(D_CACHE, "%s: Free %ld pages from own LRU: %p.\n",
cli->cl_import->imp_obd->obd_name, rc, cli);
GOTO(out, rc);
}
- CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
+ CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld, busy: %ld.\n",
cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
+ atomic_long_read(&cli->cl_lru_in_list),
+ atomic_long_read(&cli->cl_lru_busy));
/* Reclaim LRU slots from other client_obd as it can't free enough
* from its own. This should rarely happen. */
spin_lock(&cache->ccc_lru_lock);
cache->ccc_lru_shrinkers++;
- cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
+ list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
- max_scans = atomic_read(&cache->ccc_users);
- while (--max_scans > 0 && !cfs_list_empty(&cache->ccc_lru)) {
- cli = cfs_list_entry(cache->ccc_lru.next, struct client_obd,
- cl_lru_osc);
+ max_scans = atomic_read(&cache->ccc_users) - 2;
+ while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
+ cli = list_entry(cache->ccc_lru.next, struct client_obd,
+ cl_lru_osc);
- CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
+ CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
+ atomic_long_read(&cli->cl_lru_in_list),
+ atomic_long_read(&cli->cl_lru_busy));
- cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
+ list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
if (osc_cache_too_much(cli) > 0) {
spin_unlock(&cache->ccc_lru_lock);
out:
cl_env_nested_put(&nest, env);
- CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
+ CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
cli->cl_import->imp_obd->obd_name, cli, rc);
return rc;
}
+/**
+ * osc_lru_reserve() is called to reserve an LRU slot for a cl_page.
+ *
+ * Usually the LRU slots are reserved in osc_io_iter_rw_init().
+ * Only in the case that the LRU slots are in extreme shortage, it should
+ * have reserved enough slots for an IO.
+ */
static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
struct osc_page *opg)
{
goto out;
}
- LASSERT(atomic_read(cli->cl_lru_left) >= 0);
- while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
+ LASSERT(atomic_long_read(cli->cl_lru_left) >= 0);
+ while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) {
/* run out of LRU spaces, try to drop some by itself */
rc = osc_lru_reclaim(cli);
cond_resched();
rc = l_wait_event(osc_lru_waitq,
- atomic_read(cli->cl_lru_left) > 0,
+ atomic_long_read(cli->cl_lru_left) > 0,
&lwi);
if (rc < 0)
break;
out:
if (rc >= 0) {
- atomic_inc(&cli->cl_lru_busy);
+ atomic_long_inc(&cli->cl_lru_busy);
opg->ops_in_lru = 1;
rc = 0;
}
RETURN(rc);
}
+/**
+ * Atomic operations are expensive. We accumulate the accounting for the
+ * same page zone to get better performance.
+ * In practice this can work pretty good because the pages in the same RPC
+ * are likely from the same page zone.
+ */
+static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+ int factor)
+{
+ int page_count = desc->bd_iov_count;
+ void *zone = NULL;
+ int count = 0;
+ int i;
+
+ for (i = 0; i < page_count; i++) {
+ void *pz = page_zone(desc->bd_iov[i].kiov_page);
+
+ if (likely(pz == zone)) {
+ ++count;
+ continue;
+ }
+
+ if (count > 0) {
+ mod_zone_page_state(zone, NR_UNSTABLE_NFS,
+ factor * count);
+ count = 0;
+ }
+ zone = pz;
+ ++count;
+ }
+ if (count > 0)
+ mod_zone_page_state(zone, NR_UNSTABLE_NFS, factor * count);
+}
+
+static inline void add_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+{
+ unstable_page_accounting(desc, 1);
+}
+
+static inline void dec_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+{
+ unstable_page_accounting(desc, -1);
+}
+
+/**
+ * Performs "unstable" page accounting. This function balances the
+ * increment operations performed in osc_inc_unstable_pages. It is
+ * registered as the RPC request callback, and is executed when the
+ * bulk RPC is committed on the server. Thus at this point, the pages
+ * involved in the bulk transfer are no longer considered unstable.
+ *
+ * If this function is called, the request should have been committed
+ * or req:rq_unstable must have been set; it implies that the unstable
+ * statistic have been added.
+ */
+void osc_dec_unstable_pages(struct ptlrpc_request *req)
+{
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ int page_count = desc->bd_iov_count;
+ long unstable_count;
+
+ LASSERT(page_count >= 0);
+ dec_unstable_page_accounting(desc);
+
+ unstable_count = atomic_long_sub_return(page_count,
+ &cli->cl_unstable_count);
+ LASSERT(unstable_count >= 0);
+
+ unstable_count = atomic_long_sub_return(page_count,
+ &cli->cl_cache->ccc_unstable_nr);
+ LASSERT(unstable_count >= 0);
+ if (unstable_count == 0)
+ wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
+
+ if (osc_cache_too_much(cli))
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+}
+
+/**
+ * "unstable" page accounting. See: osc_dec_unstable_pages.
+ */
+void osc_inc_unstable_pages(struct ptlrpc_request *req)
+{
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ long page_count = desc->bd_iov_count;
+
+ /* No unstable page tracking */
+ if (cli->cl_cache == NULL || !cli->cl_cache->ccc_unstable_check)
+ return;
+
+ add_unstable_page_accounting(desc);
+ atomic_long_add(page_count, &cli->cl_unstable_count);
+ atomic_long_add(page_count, &cli->cl_cache->ccc_unstable_nr);
+
+ /* If the request has already been committed (i.e. brw_commit
+ * called via rq_commit_cb), we need to undo the unstable page
+ * increments we just performed because rq_commit_cb wont be
+ * called again. */
+ spin_lock(&req->rq_lock);
+ if (unlikely(req->rq_committed)) {
+ spin_unlock(&req->rq_lock);
+
+ osc_dec_unstable_pages(req);
+ } else {
+ req->rq_unstable = 1;
+ spin_unlock(&req->rq_lock);
+ }
+}
+
+/**
+ * Check if it piggybacks SOFT_SYNC flag to OST from this OSC.
+ * This function will be called by every BRW RPC so it's critical
+ * to make this function fast.
+ */
+bool osc_over_unstable_soft_limit(struct client_obd *cli)
+{
+ long unstable_nr, osc_unstable_count;
+
+ /* Can't check cli->cl_unstable_count, therefore, no soft limit */
+ if (cli->cl_cache == NULL || !cli->cl_cache->ccc_unstable_check)
+ return false;
+
+ osc_unstable_count = atomic_long_read(&cli->cl_unstable_count);
+ unstable_nr = atomic_long_read(&cli->cl_cache->ccc_unstable_nr);
+
+ CDEBUG(D_CACHE,
+ "%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
+ cli->cl_import->imp_obd->obd_name, cli,
+ unstable_nr, osc_unstable_count);
+
+ /* If the LRU slots are in shortage - 25% remaining AND this OSC
+ * has one full RPC window of unstable pages, it's a good chance
+ * to piggyback a SOFT_SYNC flag.
+ * Please notice that the OST won't take immediate response for the
+ * SOFT_SYNC request so active OSCs will have more chance to carry
+ * the flag, this is reasonable. */
+ return unstable_nr > cli->cl_cache->ccc_lru_max >> 2 &&
+ osc_unstable_count > cli->cl_max_pages_per_rpc *
+ cli->cl_max_rpcs_in_flight;
+}
+
/** @} osc */