* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include "osc_cl_internal.h"
-/** \addtogroup osc
- * @{
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del);
+static void osc_lru_add(struct client_obd *cli, struct osc_page *opg);
+static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
+ struct osc_page *opg);
+
+/** \addtogroup osc
+ * @{
*/
-/*
+/*
* Comment out osc_page_protected because it may sleep inside the
* the client_obd_list_lock.
* client_obd_list_lock -> osc_ap_completion -> osc_completion ->
descr->cld_mode = mode;
descr->cld_start = page->cp_index;
descr->cld_end = page->cp_index;
- cfs_spin_lock(&hdr->coh_lock_guard);
+ spin_lock(&hdr->coh_lock_guard);
cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
/*
* Lock-less sub-lock has to be either in HELD state
break;
}
}
- cfs_spin_unlock(&hdr->coh_lock_guard);
+ spin_unlock(&hdr->coh_lock_guard);
}
return result;
}
struct osc_page *opg = cl2osc_page(slice);
CDEBUG(D_TRACE, "%p\n", opg);
LASSERT(opg->ops_lock == NULL);
- OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
}
static void osc_page_transfer_get(struct osc_page *opg, const char *label)
static void osc_page_transfer_add(const struct lu_env *env,
struct osc_page *opg, enum cl_req_type crt)
{
- struct osc_object *obj;
+ struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- obj = cl2osc(opg->ops_cl.cpl_obj);
- cfs_spin_lock(&obj->oo_seatbelt);
- cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
- opg->ops_submitter = cfs_current();
- cfs_spin_unlock(&obj->oo_seatbelt);
+ /* ops_lru and ops_inflight share the same field, so take it from LRU
+ * first and then use it as inflight. */
+ osc_lru_del(osc_cli(obj), opg, false);
+
+ spin_lock(&obj->oo_seatbelt);
+ cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
+ opg->ops_submitter = cfs_current();
+ spin_unlock(&obj->oo_seatbelt);
}
static int osc_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
+ const struct cl_page_slice *slice,
+ struct cl_io *io)
{
+ struct osc_io *oio = osc_env_io(env);
struct osc_page *opg = cl2osc_page(slice);
int result;
ENTRY;
LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
osc_page_transfer_get(opg, "transfer\0cache");
- result = osc_queue_async_io(env, opg);
+ result = osc_queue_async_io(env, io, opg);
if (result != 0)
osc_page_transfer_put(env, opg);
else
osc_page_transfer_add(env, opg, CRT_WRITE);
+
+ /* for sync write, kernel will wait for this page to be flushed before
+ * osc_io_end() is called, so release it earlier.
+ * for mkwrite(), it's known there is no further pages. */
+ if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
+ if (oio->oi_active != NULL) {
+ osc_extent_release(env, oio->oi_active);
+ oio->oi_active = NULL;
+ }
+ }
+
RETURN(result);
}
olock = osc_lock_at(lock);
if (cfs_atomic_inc_return(&olock->ols_pageref) <= 0) {
cfs_atomic_dec(&olock->ols_pageref);
- cl_lock_put(env, lock);
- rc = 1;
+ rc = -ENODATA;
} else {
+ cl_lock_get(lock);
opg->ops_lock = lock;
rc = 0;
}
struct cl_io *unused)
{
struct cl_lock *lock;
- int result;
+ int result = -ENODATA;
ENTRY;
lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
NULL, 1, 0);
- if (lock != NULL &&
- osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
- result = -EBUSY;
- else
- result = -ENODATA;
+ if (lock != NULL) {
+ if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
+ result = -EBUSY;
+ cl_lock_put(env, lock);
+ }
RETURN(result);
}
const struct cl_page_slice *slice,
int ioret)
{
- struct osc_page *opg = cl2osc_page(slice);
+ struct osc_page *opg = cl2osc_page(slice);
+ struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- if (likely(opg->ops_lock))
- osc_page_putref_lock(env, opg);
+ if (likely(opg->ops_lock))
+ osc_page_putref_lock(env, opg);
+ osc_lru_add(osc_cli(obj), opg);
+}
+
+static void osc_page_completion_write(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int ioret)
+{
+ struct osc_page *opg = cl2osc_page(slice);
+ struct osc_object *obj = cl2osc(slice->cpl_obj);
+
+ osc_lru_add(osc_cli(obj), opg);
}
static int osc_page_fail(const struct lu_env *env,
struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
- "1< %#x %d %u %s %s %s > "
+ "1< %#x %d %u %s %s > "
"2< "LPU64" %u %u %#x %#x | %p %p %p > "
- "3< %s %p %d %lu %d > "
- "4< %d %d %d %lu %s | %s %s %s %s > "
- "5< %s %s %s %s | %d %s %s | %d %s %s>\n",
+ "3< %s %p %d %lu %d > "
+ "4< %d %d %d %lu %s | %s %s %s %s > "
+ "5< %s %s %s %s | %d %s | %d %s %s>\n",
opg,
/* 1 */
oap->oap_magic, oap->oap_cmd,
oap->oap_interrupted,
osc_list(&oap->oap_pending_item),
- osc_list(&oap->oap_urgent_item),
osc_list(&oap->oap_rpc_item),
/* 2 */
oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
osc_list(&obj->oo_hp_ready_item),
osc_list(&obj->oo_write_item),
osc_list(&obj->oo_read_item),
- obj->oo_read_pages.oop_num_pending,
- osc_list(&obj->oo_read_pages.oop_pending),
- osc_list(&obj->oo_read_pages.oop_urgent),
- obj->oo_write_pages.oop_num_pending,
- osc_list(&obj->oo_write_pages.oop_pending),
- osc_list(&obj->oo_write_pages.oop_urgent));
+ cfs_atomic_read(&obj->oo_nr_reads),
+ osc_list(&obj->oo_reading_exts),
+ cfs_atomic_read(&obj->oo_nr_writes),
+ osc_list(&obj->oo_hp_exts),
+ osc_list(&obj->oo_urgent_exts));
}
static void osc_page_delete(const struct lu_env *env,
ENTRY;
CDEBUG(D_TRACE, "%p\n", opg);
osc_page_transfer_put(env, opg);
- rc = osc_teardown_async_page(obj, opg);
+ rc = osc_teardown_async_page(env, obj, opg);
if (rc) {
CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
"Trying to teardown failed: %d\n", rc);
LASSERT(0);
}
- cfs_spin_lock(&obj->oo_seatbelt);
- cfs_list_del_init(&opg->ops_inflight);
- cfs_spin_unlock(&obj->oo_seatbelt);
- EXIT;
+
+ spin_lock(&obj->oo_seatbelt);
+ if (opg->ops_submitter != NULL) {
+ LASSERT(!cfs_list_empty(&opg->ops_inflight));
+ cfs_list_del_init(&opg->ops_inflight);
+ opg->ops_submitter = NULL;
+ }
+ spin_unlock(&obj->oo_seatbelt);
+
+ osc_lru_del(osc_cli(obj), opg, true);
+ EXIT;
}
void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
opg->ops_from = from;
opg->ops_to = to;
- cfs_spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_COUNT_STABLE;
- cfs_spin_unlock(&oap->oap_lock);
+ spin_lock(&oap->oap_lock);
+ oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&oap->oap_lock);
}
static int osc_page_cancel(const struct lu_env *env,
const struct cl_page_slice *slice)
{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_async_page *oap = &opg->ops_oap;
+ struct osc_page *opg = cl2osc_page(slice);
int rc = 0;
LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
- client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
/* Check if the transferring against this page
* is completed, or not even queued. */
if (opg->ops_transfer_pinned)
/* FIXME: may not be interrupted.. */
rc = osc_cancel_async_page(env, opg);
LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
- client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
return rc;
}
+static int osc_page_flush(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io)
+{
+ struct osc_page *opg = cl2osc_page(slice);
+ int rc = 0;
+ ENTRY;
+ rc = osc_flush_async_page(env, io, opg);
+ RETURN(rc);
+}
+
static const struct cl_page_operations osc_page_ops = {
.cpo_fini = osc_page_fini,
.cpo_print = osc_page_print,
.cpo_completion = osc_page_completion_read
},
[CRT_WRITE] = {
- .cpo_cache_add = osc_page_cache_add
- }
- },
- .cpo_clip = osc_page_clip,
- .cpo_cancel = osc_page_cancel
+ .cpo_cache_add = osc_page_cache_add,
+ .cpo_completion = osc_page_completion_write
+ }
+ },
+ .cpo_clip = osc_page_clip,
+ .cpo_cancel = osc_page_cancel,
+ .cpo_flush = osc_page_flush
};
-struct cl_page *osc_page_init(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+int osc_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage)
{
- struct osc_object *osc = cl2osc(obj);
- struct osc_page *opg;
- int result;
+ struct osc_object *osc = cl2osc(obj);
+ struct osc_page *opg = cl_object_page_slice(obj, page);
+ int result;
- OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
- if (opg != NULL) {
- opg->ops_from = 0;
- opg->ops_to = CFS_PAGE_SIZE;
-
- result = osc_prep_async_page(osc, opg, vmpage,
- cl_offset(obj, page->cp_index));
- if (result == 0) {
- struct osc_io *oio = osc_env_io(env);
- opg->ops_srvlock = osc_io_srvlock(oio);
- cl_page_slice_add(page, &opg->ops_cl, obj,
- &osc_page_ops);
- }
- /*
- * Cannot assert osc_page_protected() here as read-ahead
- * creates temporary pages outside of a lock.
- */
+ opg->ops_from = 0;
+ opg->ops_to = CFS_PAGE_SIZE;
+
+ result = osc_prep_async_page(osc, opg, vmpage,
+ cl_offset(obj, page->cp_index));
+ if (result == 0) {
+ struct osc_io *oio = osc_env_io(env);
+ opg->ops_srvlock = osc_io_srvlock(oio);
+ cl_page_slice_add(page, &opg->ops_cl, obj,
+ &osc_page_ops);
+ }
+ /*
+ * Cannot assert osc_page_protected() here as read-ahead
+ * creates temporary pages outside of a lock.
+ */
#ifdef INVARIANT_CHECK
- opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
+ opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
#endif
- CFS_INIT_LIST_HEAD(&opg->ops_inflight);
- } else
- result = -ENOMEM;
- return ERR_PTR(result);
+ /* ops_inflight and ops_lru are the same field, but it doesn't
+ * hurt to initialize it twice :-) */
+ CFS_INIT_LIST_HEAD(&opg->ops_inflight);
+ CFS_INIT_LIST_HEAD(&opg->ops_lru);
+
+ /* reserve an LRU space for this page */
+ if (page->cp_type == CPT_CACHEABLE && result == 0)
+ result = osc_lru_reserve(env, osc, opg);
+
+ return result;
}
/**
* Helper function called by osc_io_submit() for every page in an immediate
* transfer (i.e., transferred synchronously).
*/
-void osc_io_submit_page(const struct lu_env *env,
- struct osc_io *oio, struct osc_page *opg,
- enum cl_req_type crt)
+void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
+ enum cl_req_type crt, int brw_flags)
{
- LINVRNT(osc_page_protected(env, opg,
- crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
+ struct osc_async_page *oap = &opg->ops_oap;
+ struct osc_object *obj = oap->oap_obj;
+
+ LINVRNT(osc_page_protected(env, opg,
+ crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
+
+ LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
+ "magic 0x%x\n", oap, oap->oap_magic);
+ LASSERT(oap->oap_async_flags & ASYNC_READY);
+ LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
+
+ oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
+ oap->oap_page_off = opg->ops_from;
+ oap->oap_count = opg->ops_to - opg->ops_from;
+ oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
+
+ if (!client_is_remote(osc_export(obj)) &&
+ cfs_capable(CFS_CAP_SYS_RESOURCE)) {
+ oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
+ oap->oap_cmd |= OBD_BRW_NOQUOTA;
+ }
+
+ opg->ops_submit_time = cfs_time_current();
+ osc_page_transfer_get(opg, "transfer\0imm");
+ osc_page_transfer_add(env, opg, crt);
+}
+
+/* --------------- LRU page management ------------------ */
+
+/* OSC is a natural place to manage LRU pages as applications are specialized
+ * to write OSC by OSC. Ideally, if one OSC is used more frequently it should
+ * occupy more LRU slots. On the other hand, we should avoid using up all LRU
+ * slots (client_obd::cl_lru_left) otherwise process has to be put into sleep
+ * for free LRU slots - this will be very bad so the algorithm requires each
+ * OSC to free slots voluntarily to maintain a reasonable number of free slots
+ * at any time.
+ */
+
+static CFS_DECL_WAITQ(osc_lru_waitq);
+static cfs_atomic_t osc_lru_waiters = CFS_ATOMIC_INIT(0);
+/* LRU pages are freed in batch mode. OSC should at least free this
+ * number of pages to avoid running out of LRU budget, and.. */
+static const int lru_shrink_min = 2 << (20 - CFS_PAGE_SHIFT); /* 2M */
+/* free this number at most otherwise it will take too long time to finsih. */
+static const int lru_shrink_max = 32 << (20 - CFS_PAGE_SHIFT); /* 32M */
+
+/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
+ * we should free slots aggressively. In this way, slots are freed in a steady
+ * step to maintain fairness among OSCs.
+ *
+ * Return how many LRU pages should be freed. */
+static int osc_cache_too_much(struct client_obd *cli)
+{
+ struct cl_client_cache *cache = cli->cl_cache;
+ int pages = cfs_atomic_read(&cli->cl_lru_in_list) >> 1;
+
+ if (cfs_atomic_read(&osc_lru_waiters) > 0 &&
+ cfs_atomic_read(cli->cl_lru_left) < lru_shrink_max)
+ /* drop lru pages aggressively */
+ return min(pages, lru_shrink_max);
+
+ /* if it's going to run out LRU slots, we should free some, but not
+ * too much to maintain faireness among OSCs. */
+ if (cfs_atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+ unsigned long tmp;
+
+ tmp = cache->ccc_lru_max / cfs_atomic_read(&cache->ccc_users);
+ if (pages > tmp)
+ return min(pages, lru_shrink_max);
+
+ return pages > lru_shrink_min ? lru_shrink_min : 0;
+ }
+
+ return 0;
+}
+
+/* Return how many pages are not discarded in @pvec. */
+static int discard_pagevec(const struct lu_env *env, struct cl_io *io,
+ struct cl_page **pvec, int max_index)
+{
+ int count;
+ int i;
+
+ for (count = 0, i = 0; i < max_index; i++) {
+ struct cl_page *page = pvec[i];
+ if (cl_page_own_try(env, io, page) == 0) {
+ /* free LRU page only if nobody is using it.
+ * This check is necessary to avoid freeing the pages
+ * having already been removed from LRU and pinned
+ * for IO. */
+ if (!cl_page_in_use(page)) {
+ cl_page_unmap(env, io, page);
+ cl_page_discard(env, io, page);
+ ++count;
+ }
+ cl_page_disown(env, io, page);
+ }
+ cl_page_put(env, page);
+ pvec[i] = NULL;
+ }
+ return max_index - count;
+}
+
+/**
+ * Drop @target of pages from LRU at most.
+ */
+int osc_lru_shrink(struct client_obd *cli, int target)
+{
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_object *clobj = NULL;
+ struct cl_page **pvec;
+ struct osc_page *opg;
+ int maxscan = 0;
+ int count = 0;
+ int index = 0;
+ int rc = 0;
+ ENTRY;
+
+ LASSERT(cfs_atomic_read(&cli->cl_lru_in_list) >= 0);
+ if (cfs_atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
+ RETURN(0);
+
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ pvec = osc_env_info(env)->oti_pvec;
+ io = &osc_env_info(env)->oti_io;
+
+ client_obd_list_lock(&cli->cl_lru_list_lock);
+ cfs_atomic_inc(&cli->cl_lru_shrinkers);
+ maxscan = min(target << 1, cfs_atomic_read(&cli->cl_lru_in_list));
+ while (!cfs_list_empty(&cli->cl_lru_list)) {
+ struct cl_page *page;
+
+ if (--maxscan < 0)
+ break;
+
+ opg = cfs_list_entry(cli->cl_lru_list.next, struct osc_page,
+ ops_lru);
+ page = cl_page_top(opg->ops_cl.cpl_page);
+ if (cl_page_in_use_noref(page)) {
+ cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ continue;
+ }
+
+ LASSERT(page->cp_obj != NULL);
+ if (clobj != page->cp_obj) {
+ struct cl_object *tmp = page->cp_obj;
+
+ cl_object_get(tmp);
+ client_obd_list_unlock(&cli->cl_lru_list_lock);
+
+ if (clobj != NULL) {
+ count -= discard_pagevec(env, io, pvec, index);
+ index = 0;
+
+ cl_io_fini(env, io);
+ cl_object_put(env, clobj);
+ clobj = NULL;
+ }
+
+ clobj = tmp;
+ io->ci_obj = clobj;
+ rc = cl_io_init(env, io, CIT_MISC, clobj);
+ if (rc != 0)
+ break;
+
+ ++maxscan;
+ client_obd_list_lock(&cli->cl_lru_list_lock);
+ continue;
+ }
+
+ /* move this page to the end of list as it will be discarded
+ * soon. The page will be finally removed from LRU list in
+ * osc_page_delete(). */
+ cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+
+ /* it's okay to grab a refcount here w/o holding lock because
+ * it has to grab cl_lru_list_lock to delete the page. */
+ cl_page_get(page);
+ pvec[index++] = page;
+ if (++count >= target)
+ break;
+
+ if (unlikely(index == OTI_PVEC_SIZE)) {
+ client_obd_list_unlock(&cli->cl_lru_list_lock);
+ count -= discard_pagevec(env, io, pvec, index);
+ index = 0;
+
+ client_obd_list_lock(&cli->cl_lru_list_lock);
+ }
+ }
+ client_obd_list_unlock(&cli->cl_lru_list_lock);
+
+ if (clobj != NULL) {
+ count -= discard_pagevec(env, io, pvec, index);
+
+ cl_io_fini(env, io);
+ cl_object_put(env, clobj);
+ }
+ cl_env_nested_put(&nest, env);
+
+ cfs_atomic_dec(&cli->cl_lru_shrinkers);
+ RETURN(count > 0 ? count : rc);
+}
+
+static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
+{
+ bool wakeup = false;
+
+ if (!opg->ops_in_lru)
+ return;
+
+ cfs_atomic_dec(&cli->cl_lru_busy);
+ client_obd_list_lock(&cli->cl_lru_list_lock);
+ if (cfs_list_empty(&opg->ops_lru)) {
+ cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ cfs_atomic_inc_return(&cli->cl_lru_in_list);
+ wakeup = cfs_atomic_read(&osc_lru_waiters) > 0;
+ }
+ client_obd_list_unlock(&cli->cl_lru_list_lock);
+
+ if (wakeup)
+ cfs_waitq_broadcast(&osc_lru_waitq);
+}
- osc_queue_sync_page(env, opg,
- crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
- osc_io_srvlock(oio) ? OBD_BRW_SRVLOCK : 0);
+/* delete page from LRUlist. The page can be deleted from LRUlist for two
+ * reasons: redirtied or deleted from page cache. */
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
+{
+ if (opg->ops_in_lru) {
+ client_obd_list_lock(&cli->cl_lru_list_lock);
+ if (!cfs_list_empty(&opg->ops_lru)) {
+ LASSERT(cfs_atomic_read(&cli->cl_lru_in_list) > 0);
+ cfs_list_del_init(&opg->ops_lru);
+ cfs_atomic_dec(&cli->cl_lru_in_list);
+ if (!del)
+ cfs_atomic_inc(&cli->cl_lru_busy);
+ } else if (del) {
+ LASSERT(cfs_atomic_read(&cli->cl_lru_busy) > 0);
+ cfs_atomic_dec(&cli->cl_lru_busy);
+ }
+ client_obd_list_unlock(&cli->cl_lru_list_lock);
+ if (del) {
+ cfs_atomic_inc(cli->cl_lru_left);
+ /* this is a great place to release more LRU pages if
+ * this osc occupies too many LRU pages and kernel is
+ * stealing one of them.
+ * cl_lru_shrinkers is to avoid recursive call in case
+ * we're already in the context of osc_lru_shrink(). */
+ if (cfs_atomic_read(&cli->cl_lru_shrinkers) == 0)
+ osc_lru_shrink(cli, osc_cache_too_much(cli));
+ cfs_waitq_signal(&osc_lru_waitq);
+ }
+ } else {
+ LASSERT(cfs_list_empty(&opg->ops_lru));
+ }
+}
+
+static int osc_lru_reclaim(struct client_obd *cli)
+{
+ struct cl_client_cache *cache = cli->cl_cache;
+ struct client_obd *victim;
+ struct client_obd *tmp;
+ int rc;
+
+ LASSERT(cache != NULL);
+ LASSERT(!cfs_list_empty(&cache->ccc_lru));
+
+ rc = osc_lru_shrink(cli, lru_shrink_min);
+ if (rc > 0) {
+ CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
+ cli->cl_import->imp_obd->obd_name, rc, cli);
+ return rc;
+ }
+
+ CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
+ cli->cl_import->imp_obd->obd_name, cli,
+ cfs_atomic_read(&cli->cl_lru_in_list),
+ cfs_atomic_read(&cli->cl_lru_busy));
+
+ /* Reclaim LRU slots from other client_obd as it can't free enough
+ * from its own. This should rarely happen. */
+ spin_lock(&cache->ccc_lru_lock);
+ cache->ccc_lru_shrinkers++;
+ cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
+ cfs_list_for_each_entry_safe(victim, tmp, &cache->ccc_lru, cl_lru_osc) {
+ if (victim == cli)
+ break;
+
+ CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
+ victim->cl_import->imp_obd->obd_name, victim,
+ cfs_atomic_read(&victim->cl_lru_in_list),
+ cfs_atomic_read(&victim->cl_lru_busy));
+
+ cfs_list_move_tail(&victim->cl_lru_osc, &cache->ccc_lru);
+ if (cfs_atomic_read(&victim->cl_lru_in_list) > 0)
+ break;
+ }
+ spin_unlock(&cache->ccc_lru_lock);
+ if (victim == cli) {
+ CDEBUG(D_CACHE, "%s: can't get any free LRU slots.\n",
+ cli->cl_import->imp_obd->obd_name);
+ return 0;
+ }
+
+ rc = osc_lru_shrink(victim,
+ min(cfs_atomic_read(&victim->cl_lru_in_list) >> 1,
+ lru_shrink_max));
+
+ CDEBUG(D_CACHE, "%s: Free %d pages from other cli: %p.\n",
+ cli->cl_import->imp_obd->obd_name, rc, victim);
+
+ return rc;
+}
+
+static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
+ struct osc_page *opg)
+{
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ struct client_obd *cli = osc_cli(obj);
+ int rc = 0;
+ ENTRY;
- osc_page_transfer_get(opg, "transfer\0imm");
- osc_page_transfer_add(env, opg, crt);
+ if (cli->cl_cache == NULL) /* shall not be in LRU */
+ RETURN(0);
+
+ LASSERT(cfs_atomic_read(cli->cl_lru_left) >= 0);
+ while (!cfs_atomic_add_unless(cli->cl_lru_left, -1, 0)) {
+ int gen;
+
+ /* run out of LRU spaces, try to drop some by itself */
+ rc = osc_lru_reclaim(cli);
+ if (rc < 0)
+ break;
+ if (rc > 0)
+ continue;
+
+ cfs_cond_resched();
+
+ /* slowest case, all of caching pages are busy, notifying
+ * other OSCs that we're lack of LRU slots. */
+ cfs_atomic_inc(&osc_lru_waiters);
+
+ gen = cfs_atomic_read(&cli->cl_lru_in_list);
+ rc = l_wait_event(osc_lru_waitq,
+ cfs_atomic_read(cli->cl_lru_left) > 0 ||
+ (cfs_atomic_read(&cli->cl_lru_in_list) > 0 &&
+ gen != cfs_atomic_read(&cli->cl_lru_in_list)),
+ &lwi);
+
+ cfs_atomic_dec(&osc_lru_waiters);
+ if (rc < 0)
+ break;
+ }
+
+ if (rc >= 0) {
+ cfs_atomic_inc(&cli->cl_lru_busy);
+ opg->ops_in_lru = 1;
+ rc = 0;
+ }
+
+ RETURN(rc);
}
/** @} osc */