-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Implementation of cl_page for OSC layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
-/** \addtogroup osc osc @{ */
-
#define DEBUG_SUBSYSTEM S_OSC
+#include <lustre_osc.h>
-#include "osc_cl_internal.h"
-
-static int osc_page_is_dlocked(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int pending, int unref)
-{
- struct cl_page *page;
- struct osc_object *obj;
- struct osc_thread_info *info;
- struct ldlm_res_id *resname;
- struct lustre_handle *lockh;
- ldlm_policy_data_t *policy;
- ldlm_mode_t dlmmode;
- int flags;
-
- info = osc_env_info(env);
- resname = &info->oti_resname;
- policy = &info->oti_policy;
- lockh = &info->oti_handle;
- page = opg->ops_cl.cpl_page;
- obj = cl2osc(opg->ops_cl.cpl_obj);
-
- flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
- if (pending)
- flags |= LDLM_FL_CBPENDING;
-
- dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
- osc_lock_build_res(env, obj, resname);
- osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
- return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
- dlmmode, &flags, NULL, lockh, unref);
-}
-
-static int osc_page_protected(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int unref)
-{
- struct cl_object_header *hdr;
- struct cl_lock *scan;
- struct cl_page *page;
- struct cl_lock_descr *descr;
- int result;
-
- LINVRNT(!opg->ops_temp);
-
- result = osc_page_is_dlocked(env, opg, mode, 1, unref);
- if (result == 0) {
- /* maybe this page is a part of a lockless io? */
- hdr = cl_object_header(opg->ops_cl.cpl_obj);
- page = opg->ops_cl.cpl_page;
- descr = &osc_env_info(env)->oti_descr;
- descr->cld_mode = mode;
- descr->cld_start = page->cp_index;
- descr->cld_end = page->cp_index;
- spin_lock(&hdr->coh_lock_guard);
- list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
- /*
- * Lock-less sub-lock has to be either in HELD state
- * (when io is actively going on), or in CACHED state,
- * when top-lock is being unlocked:
- * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
- */
- if ((scan->cll_state == CLS_HELD ||
- scan->cll_state == CLS_CACHED) &&
- cl_lock_ext_match(&scan->cll_descr, descr)) {
- struct osc_lock *olck;
-
- olck = osc_lock_at(scan);
- result = osc_lock_is_lockless(olck);
- break;
- }
- }
- spin_unlock(&hdr->coh_lock_guard);
- }
- return result;
-}
+#include "osc_internal.h"
-/*****************************************************************************
- *
- * Page operations.
- *
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
+static void osc_lru_use(struct client_obd *cli, struct osc_page *opg);
+static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli,
+ struct osc_page *opg);
+
+/** \addtogroup osc
+ * @{
*/
-static void osc_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct osc_page *opg = cl2osc_page(slice);
- CDEBUG(D_TRACE, "%p\n", opg);
- OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
-}
+/*
+ * Page operations.
+ */
static void osc_page_transfer_get(struct osc_page *opg, const char *label)
{
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+ struct cl_page *page = opg->ops_cl.cpl_page;
- LASSERT(!opg->ops_transfer_pinned);
- cl_page_get(page);
- lu_ref_add_atomic(&page->cp_reference, label, page);
- opg->ops_transfer_pinned = 1;
+ LASSERT(!opg->ops_transfer_pinned);
+ cl_page_get(page);
+ lu_ref_add_atomic(&page->cp_reference, label, page);
+ opg->ops_transfer_pinned = 1;
}
static void osc_page_transfer_put(const struct lu_env *env,
- struct osc_page *opg)
+ struct osc_page *opg)
{
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+ struct cl_page *page = opg->ops_cl.cpl_page;
- if (opg->ops_transfer_pinned) {
- lu_ref_del(&page->cp_reference, "transfer", page);
- opg->ops_transfer_pinned = 0;
- cl_page_put(env, page);
- }
+ if (opg->ops_transfer_pinned) {
+ opg->ops_transfer_pinned = 0;
+ lu_ref_del(&page->cp_reference, "transfer", page);
+ cl_page_put(env, page);
+ }
}
/**
static void osc_page_transfer_add(const struct lu_env *env,
struct osc_page *opg, enum cl_req_type crt)
{
- struct osc_object *obj;
-
- LINVRNT(cl_page_is_vmlocked(env, opg->ops_cl.cpl_page));
+ struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- obj = cl2osc(opg->ops_cl.cpl_obj);
- spin_lock(&obj->oo_seatbelt);
- list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
- opg->ops_submitter = cfs_current();
- spin_unlock(&obj->oo_seatbelt);
+ /* ops_lru and ops_inflight share the same field, so take it from LRU
+ * first and then use it as inflight. */
+ osc_lru_use(osc_cli(obj), opg);
}
-static int osc_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *_)
+int osc_page_cache_add(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io)
{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- struct osc_io *oio = osc_env_io(env);
- int result;
- int brw_flags;
- int noquota = 0;
-
- LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
- ENTRY;
-
- /* Set the OBD_BRW_SRVLOCK before the page is queued. */
- brw_flags = oio->oi_lockless ? OBD_BRW_SRVLOCK : 0;
- if (!client_is_remote(osc_export(obj)) &&
- cfs_capable(CFS_CAP_SYS_RESOURCE)) {
- brw_flags |= OBD_BRW_NOQUOTA;
- noquota = OBD_BRW_NOQUOTA;
- }
-
- osc_page_transfer_get(opg, "transfer\0cache");
- result = osc_queue_async_io(env, osc_export(obj), NULL, obj->oo_oinfo,
- &opg->ops_oap, OBD_BRW_WRITE | noquota,
- 0, 0, brw_flags, 0);
- if (result != 0)
- osc_page_transfer_put(env, opg);
- else
- osc_page_transfer_add(env, opg, CRT_WRITE);
- RETURN(result);
+ struct osc_page *opg = cl2osc_page(slice);
+ int result;
+ ENTRY;
+
+ osc_page_transfer_get(opg, "transfer\0cache");
+ result = osc_queue_async_io(env, io, opg);
+ if (result != 0)
+ osc_page_transfer_put(env, opg);
+ else
+ osc_page_transfer_add(env, opg, CRT_WRITE);
+
+ RETURN(result);
}
-void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
- pgoff_t start, pgoff_t end)
+void osc_index2policy(union ldlm_policy_data *policy,
+ const struct cl_object *obj, pgoff_t start, pgoff_t end)
{
- memset(policy, 0, sizeof *policy);
- policy->l_extent.start = cl_offset(obj, start);
- policy->l_extent.end = cl_offset(obj, end + 1) - 1;
+ memset(policy, 0, sizeof *policy);
+ policy->l_extent.start = cl_offset(obj, start);
+ policy->l_extent.end = cl_offset(obj, end + 1) - 1;
}
-static int osc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *_)
+static const char *osc_list(struct list_head *head)
{
- struct cl_lock *lock;
- int result;
-
- ENTRY;
- lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
- NULL, 1, 0);
- if (lock != NULL) {
- cl_lock_put(env, lock);
- result = -EBUSY;
- } else
- result = -ENODATA;
- RETURN(result);
+ return list_empty(head) ? "-" : "+";
}
-static int osc_page_fail(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *_)
+static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
{
- /*
- * Cached read?
- */
- LBUG();
- return 0;
-}
-
+ if (opg->ops_submit_time == 0)
+ return 0;
-static const char *osc_list(struct list_head *head)
-{
- return list_empty(head) ? "-" : "+";
+ return (cfs_time_current() - opg->ops_submit_time);
}
static int osc_page_print(const struct lu_env *env,
{
struct osc_page *opg = cl2osc_page(slice);
struct osc_async_page *oap = &opg->ops_oap;
-
- return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
- "%#x %d %u %s %s %s %llu %u %#x %p %p %p %p %p\n",
- opg, oap->oap_magic, oap->oap_cmd,
+ struct osc_object *obj = cl2osc(slice->cpl_obj);
+ struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
+
+ return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p %lu: "
+ "1< %#x %d %u %s %s > "
+ "2< %lld %u %u %#x %#x | %p %p %p > "
+ "3< %d %lu %d > "
+ "4< %d %d %d %lu %s | %s %s %s %s > "
+ "5< %s %s %s %s | %d %s | %d %s %s>\n",
+ opg, osc_index(opg),
+ /* 1 */
+ oap->oap_magic, oap->oap_cmd,
oap->oap_interrupted,
osc_list(&oap->oap_pending_item),
- osc_list(&oap->oap_urgent_item),
osc_list(&oap->oap_rpc_item),
- oap->oap_obj_off, oap->oap_page_off,
- oap->oap_async_flags, oap->oap_request,
- oap->oap_cli, oap->oap_loi, oap->oap_caller_ops,
- oap->oap_caller_data);
+ /* 2 */
+ oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
+ oap->oap_async_flags, oap->oap_brw_flags,
+ oap->oap_request, oap->oap_cli, obj,
+ /* 3 */
+ opg->ops_transfer_pinned,
+ osc_submit_duration(opg), opg->ops_srvlock,
+ /* 4 */
+ cli->cl_r_in_flight, cli->cl_w_in_flight,
+ cli->cl_max_rpcs_in_flight,
+ cli->cl_avail_grant,
+ osc_list(&cli->cl_cache_waiters),
+ osc_list(&cli->cl_loi_ready_list),
+ osc_list(&cli->cl_loi_hp_ready_list),
+ osc_list(&cli->cl_loi_write_list),
+ osc_list(&cli->cl_loi_read_list),
+ /* 5 */
+ osc_list(&obj->oo_ready_item),
+ osc_list(&obj->oo_hp_ready_item),
+ osc_list(&obj->oo_write_item),
+ osc_list(&obj->oo_read_item),
+ atomic_read(&obj->oo_nr_reads),
+ osc_list(&obj->oo_reading_exts),
+ atomic_read(&obj->oo_nr_writes),
+ osc_list(&obj->oo_hp_exts),
+ osc_list(&obj->oo_urgent_exts));
}
static void osc_page_delete(const struct lu_env *env,
- const struct cl_page_slice *slice)
+ const struct cl_page_slice *slice)
{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- struct osc_async_page *oap = &opg->ops_oap;
- int rc;
+ struct osc_page *opg = cl2osc_page(slice);
+ struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
+ int rc;
+
+ ENTRY;
+ CDEBUG(D_TRACE, "%p\n", opg);
+ osc_page_transfer_put(env, opg);
+ rc = osc_teardown_async_page(env, obj, opg);
+ if (rc) {
+ CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page,
+ "Trying to teardown failed: %d\n", rc);
+ LASSERT(0);
+ }
+
+ osc_lru_del(osc_cli(obj), opg);
+
+ if (slice->cpl_page->cp_type == CPT_CACHEABLE) {
+ void *value;
+
+ spin_lock(&obj->oo_tree_lock);
+ value = radix_tree_delete(&obj->oo_tree, osc_index(opg));
+ if (value != NULL)
+ --obj->oo_npages;
+ spin_unlock(&obj->oo_tree_lock);
+
+ LASSERT(ergo(value != NULL, value == opg));
+ }
+
+ EXIT;
+}
- LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
+static void osc_page_clip(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int from, int to)
+{
+ struct osc_page *opg = cl2osc_page(slice);
+ struct osc_async_page *oap = &opg->ops_oap;
+
+ opg->ops_from = from;
+ opg->ops_to = to;
+ spin_lock(&oap->oap_lock);
+ oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&oap->oap_lock);
+}
- ENTRY;
- CDEBUG(D_TRACE, "%p\n", opg);
- osc_page_transfer_put(env, opg);
- rc = osc_teardown_async_page(osc_export(obj), NULL, obj->oo_oinfo, oap);
- LASSERTF(rc == 0, "%i\n", rc);
- spin_lock(&obj->oo_seatbelt);
- list_del_init(&opg->ops_inflight);
- spin_unlock(&obj->oo_seatbelt);
- EXIT;
+static int osc_page_cancel(const struct lu_env *env,
+ const struct cl_page_slice *slice)
+{
+ struct osc_page *opg = cl2osc_page(slice);
+ int rc = 0;
+
+ /* Check if the transferring against this page
+ * is completed, or not even queued. */
+ if (opg->ops_transfer_pinned)
+ /* FIXME: may not be interrupted.. */
+ rc = osc_cancel_async_page(env, opg);
+ LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
+ return rc;
}
-void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
- int from, int to)
+static int osc_page_flush(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io)
{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_async_page *oap = &opg->ops_oap;
+ struct osc_page *opg = cl2osc_page(slice);
+ int rc = 0;
+ ENTRY;
+ rc = osc_flush_async_page(env, io, opg);
+ RETURN(rc);
+}
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
+static const struct cl_page_operations osc_page_ops = {
+ .cpo_print = osc_page_print,
+ .cpo_delete = osc_page_delete,
+ .cpo_clip = osc_page_clip,
+ .cpo_cancel = osc_page_cancel,
+ .cpo_flush = osc_page_flush
+};
- opg->ops_from = from;
- opg->ops_to = to;
- oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+int osc_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, pgoff_t index)
+{
+ struct osc_object *osc = cl2osc(obj);
+ struct osc_page *opg = cl_object_page_slice(obj, page);
+ struct osc_io *oio = osc_env_io(env);
+ int result;
+
+ opg->ops_from = 0;
+ opg->ops_to = PAGE_SIZE;
+
+ INIT_LIST_HEAD(&opg->ops_lru);
+
+ result = osc_prep_async_page(osc, opg, page->cp_vmpage,
+ cl_offset(obj, index));
+ if (result != 0)
+ return result;
+
+ opg->ops_srvlock = osc_io_srvlock(oio);
+ cl_page_slice_add(page, &opg->ops_cl, obj, index,
+ &osc_page_ops);
+
+
+ /* reserve an LRU space for this page */
+ if (page->cp_type == CPT_CACHEABLE) {
+ result = osc_lru_alloc(env, osc_cli(osc), opg);
+ if (result == 0) {
+ result = radix_tree_preload(GFP_NOFS);
+ if (result == 0) {
+ spin_lock(&osc->oo_tree_lock);
+ result = radix_tree_insert(&osc->oo_tree,
+ index, opg);
+ if (result == 0)
+ ++osc->oo_npages;
+ spin_unlock(&osc->oo_tree_lock);
+
+ radix_tree_preload_end();
+ }
+ }
+ }
+
+ return result;
}
-static int osc_page_cancel(const struct lu_env *env,
- const struct cl_page_slice *slice)
+/**
+ * Helper function called by osc_io_submit() for every page in an immediate
+ * transfer (i.e., transferred synchronously).
+ */
+void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
+ enum cl_req_type crt, int brw_flags)
{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_async_page *oap = &opg->ops_oap;
- int rc = 0;
+ struct osc_async_page *oap = &opg->ops_oap;
+
+ LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
+ "magic 0x%x\n", oap, oap->oap_magic);
+ LASSERT(oap->oap_async_flags & ASYNC_READY);
+ LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
+
+ oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
+ oap->oap_page_off = opg->ops_from;
+ oap->oap_count = opg->ops_to - opg->ops_from;
+ oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
+
+ if (cfs_capable(CFS_CAP_SYS_RESOURCE)) {
+ oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
+ oap->oap_cmd |= OBD_BRW_NOQUOTA;
+ }
+
+ opg->ops_submit_time = cfs_time_current();
+ osc_page_transfer_get(opg, "transfer\0imm");
+ osc_page_transfer_add(env, opg, crt);
+}
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
+/* --------------- LRU page management ------------------ */
+
+/* OSC is a natural place to manage LRU pages as applications are specialized
+ * to write OSC by OSC. Ideally, if one OSC is used more frequently it should
+ * occupy more LRU slots. On the other hand, we should avoid using up all LRU
+ * slots (client_obd::cl_lru_left) otherwise process has to be put into sleep
+ * for free LRU slots - this will be very bad so the algorithm requires each
+ * OSC to free slots voluntarily to maintain a reasonable number of free slots
+ * at any time.
+ */
- client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
- /* Check if the transferring against this page
- * is completed, or not even queued. */
- if (opg->ops_transfer_pinned)
- /* FIXME: may not be interrupted.. */
- rc = osc_oap_interrupted(env, oap);
- LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
- client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
- return rc;
+static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
+
+/**
+ * LRU pages are freed in batch mode. OSC should at least free this
+ * number of pages to avoid running out of LRU slots.
+ */
+static inline int lru_shrink_min(struct client_obd *cli)
+{
+ return cli->cl_max_pages_per_rpc * 2;
}
-static const struct cl_page_operations osc_page_ops = {
- .cpo_fini = osc_page_fini,
- .cpo_print = osc_page_print,
- .cpo_delete = osc_page_delete,
- .cpo_is_under_lock = osc_page_is_under_lock,
- .io = {
- [CRT_READ] = {
- .cpo_cache_add = osc_page_fail
- },
- [CRT_WRITE] = {
- .cpo_cache_add = osc_page_cache_add
- }
- },
- .cpo_clip = osc_page_clip,
- .cpo_cancel = osc_page_cancel
-};
+/**
+ * free this number at most otherwise it will take too long time to finsih.
+ */
+static inline int lru_shrink_max(struct client_obd *cli)
+{
+ return cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
+}
-static int osc_make_ready(const struct lu_env *env, void *data, int cmd)
+/**
+ * Check if we can free LRU slots from this OSC. If there exists LRU waiters,
+ * we should free slots aggressively. In this way, slots are freed in a steady
+ * step to maintain fairness among OSCs.
+ *
+ * Return how many LRU pages should be freed.
+ */
+static int osc_cache_too_much(struct client_obd *cli)
+{
+ struct cl_client_cache *cache = cli->cl_cache;
+ long pages = atomic_long_read(&cli->cl_lru_in_list);
+ unsigned long budget;
+
+ LASSERT(cache != NULL);
+ budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2);
+
+ /* if it's going to run out LRU slots, we should free some, but not
+ * too much to maintain faireness among OSCs. */
+ if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 2) {
+ if (pages >= budget)
+ return lru_shrink_max(cli);
+ else if (pages >= budget / 2)
+ return lru_shrink_min(cli);
+ } else {
+ time64_t duration = ktime_get_real_seconds();
+ long timediff;
+
+ /* knock out pages by duration of no IO activity */
+ duration -= cli->cl_lru_last_used;
+ /*
+ * The difference shouldn't be more than 70 years
+ * so we can safely case to a long. Round to
+ * approximately 1 minute.
+ */
+ timediff = (long)(duration >> 6);
+ if (timediff > 0 && pages >= budget / timediff)
+ return lru_shrink_min(cli);
+ }
+ return 0;
+}
+
+int lru_queue_work(const struct lu_env *env, void *data)
{
- struct osc_page *opg = data;
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
- int result;
+ struct client_obd *cli = data;
+ int count;
+
+ CDEBUG(D_CACHE, "%s: run LRU work for client obd\n", cli_name(cli));
+ count = osc_cache_too_much(cli);
+ if (count > 0) {
+ int rc = osc_lru_shrink(env, cli, count, false);
+
+ CDEBUG(D_CACHE, "%s: shrank %d/%d pages from client obd\n",
+ cli_name(cli), rc, count);
+ if (rc >= count) {
+ CDEBUG(D_CACHE, "%s: queue again\n", cli_name(cli));
+ ptlrpcd_queue_work(cli->cl_lru_work);
+ }
+ }
+
+ RETURN(0);
+}
- LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
- LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 1));
+void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
+{
+ struct list_head lru = LIST_HEAD_INIT(lru);
+ struct osc_async_page *oap;
+ long npages = 0;
+
+ list_for_each_entry(oap, plist, oap_pending_item) {
+ struct osc_page *opg = oap2osc_page(oap);
+
+ if (!opg->ops_in_lru)
+ continue;
+
+ ++npages;
+ LASSERT(list_empty(&opg->ops_lru));
+ list_add(&opg->ops_lru, &lru);
+ }
+
+ if (npages > 0) {
+ spin_lock(&cli->cl_lru_list_lock);
+ list_splice_tail(&lru, &cli->cl_lru_list);
+ atomic_long_sub(npages, &cli->cl_lru_busy);
+ atomic_long_add(npages, &cli->cl_lru_in_list);
+ cli->cl_lru_last_used = ktime_get_real_seconds();
+ spin_unlock(&cli->cl_lru_list_lock);
+
+ if (waitqueue_active(&osc_lru_waitq))
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
+}
- ENTRY;
- result = cl_page_make_ready(env, page, CRT_WRITE);
- RETURN(result);
+static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
+{
+ LASSERT(atomic_long_read(&cli->cl_lru_in_list) > 0);
+ list_del_init(&opg->ops_lru);
+ atomic_long_dec(&cli->cl_lru_in_list);
}
-static int osc_refresh_count(const struct lu_env *env, void *data, int cmd)
+/**
+ * Page is being destroyed. The page may be not in LRU list, if the transfer
+ * has never finished(error occurred).
+ */
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
{
- struct cl_page *page;
- struct osc_page *osc = data;
- struct cl_object *obj;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+ if (opg->ops_in_lru) {
+ spin_lock(&cli->cl_lru_list_lock);
+ if (!list_empty(&opg->ops_lru)) {
+ __osc_lru_del(cli, opg);
+ } else {
+ LASSERT(atomic_long_read(&cli->cl_lru_busy) > 0);
+ atomic_long_dec(&cli->cl_lru_busy);
+ }
+ spin_unlock(&cli->cl_lru_list_lock);
+
+ atomic_long_inc(cli->cl_lru_left);
+ /* this is a great place to release more LRU pages if
+ * this osc occupies too many LRU pages and kernel is
+ * stealing one of them. */
+ if (osc_cache_too_much(cli)) {
+ CDEBUG(D_CACHE, "%s: queue LRU work\n", cli_name(cli));
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
+ wake_up(&osc_lru_waitq);
+ } else {
+ LASSERT(list_empty(&opg->ops_lru));
+ }
+}
- int result;
- loff_t kms;
+/**
+ * Delete page from LRU list for redirty.
+ */
+static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
+{
+ /* If page is being transferred for the first time,
+ * ops_lru should be empty */
+ if (opg->ops_in_lru) {
+ spin_lock(&cli->cl_lru_list_lock);
+ if (!list_empty(&opg->ops_lru)) {
+ __osc_lru_del(cli, opg);
+ atomic_long_inc(&cli->cl_lru_busy);
+ }
+ spin_unlock(&cli->cl_lru_list_lock);
+ }
+}
- LINVRNT(osc_page_protected(env, osc, CLM_READ, 1));
+static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
+ struct cl_page **pvec, int max_index)
+{
+ int i;
- /* readpage queues with _COUNT_STABLE, shouldn't get here. */
- LASSERT(!(cmd & OBD_BRW_READ));
- LASSERT(osc != NULL);
- page = osc->ops_cl.cpl_page;
- obj = osc->ops_cl.cpl_obj;
+ for (i = 0; i < max_index; i++) {
+ struct cl_page *page = pvec[i];
- cl_object_attr_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- cl_object_attr_unlock(obj);
- if (result < 0)
- return result;
- kms = attr->cat_kms;
- if (cl_offset(obj, page->cp_index) >= kms)
- /* catch race with truncate */
- return 0;
- else if (cl_offset(obj, page->cp_index + 1) > kms)
- /* catch sub-page write at end of file */
- return kms % CFS_PAGE_SIZE;
- else
- return CFS_PAGE_SIZE;
-}
-
-static int osc_completion(const struct lu_env *env,
- void *data, int cmd, struct obdo *oa, int rc)
-{
- struct osc_page *opg = data;
- struct osc_async_page *oap = &opg->ops_oap;
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- enum cl_req_type crt;
-
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 1));
- LINVRNT(cl_page_is_vmlocked(env, page));
-
- ENTRY;
-
- cmd &= ~OBD_BRW_NOQUOTA;
- LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
- LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
- LASSERT(opg->ops_transfer_pinned);
-
- /*
- * page->cp_req can be NULL if io submission failed before
- * cl_req was allocated.
- */
- if (page->cp_req != NULL)
- cl_req_page_done(env, page);
- LASSERT(page->cp_req == NULL);
-
- /* As the transfer for this page is being done, clear the flags */
- oap->oap_async_flags = 0;
-
- crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
- /* Clear opg->ops_transfer_pinned before VM lock is released. */
- opg->ops_transfer_pinned = 0;
-
- spin_lock(&obj->oo_seatbelt);
- LASSERT(opg->ops_submitter != NULL);
- LASSERT(!list_empty(&opg->ops_inflight));
- list_del_init(&opg->ops_inflight);
- spin_unlock(&obj->oo_seatbelt);
-
- cl_page_completion(env, page, crt, rc);
-
- /* statistic */
- if (rc == 0 && oap->oap_brw_flags & OBD_BRW_SRVLOCK) {
- struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
- struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
- int bytes = opg->ops_to - opg->ops_from;
-
- if (crt == CRT_READ)
- stats->os_lockless_reads += bytes;
- else
- stats->os_lockless_writes += bytes;
+ LASSERT(cl_page_is_owned(page, io));
+ cl_page_delete(env, page);
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ cl_page_put(env, page);
+
+ pvec[i] = NULL;
}
+}
- /*
- * This has to be the last operation with the page, as locks are
- * released in cl_page_completion() and nothing except for the
- * reference counter protects page from concurrent reclaim.
- */
- lu_ref_del(&page->cp_reference, "transfer", page);
- /*
- * As page->cp_obj is pinned by a reference from page->cp_req, it is
- * safe to call cl_page_put() without risking object destruction in a
- * non-blocking context.
- */
- cl_page_put(env, page);
- RETURN(0);
-}
-
-const static struct obd_async_page_ops osc_async_page_ops = {
- .ap_make_ready = osc_make_ready,
- .ap_refresh_count = osc_refresh_count,
- .ap_completion = osc_completion
-};
+/**
+ * Check if a cl_page can be released, i.e, it's not being used.
+ *
+ * If unstable account is turned on, bulk transfer may hold one refcount
+ * for recovery so we need to check vmpage refcount as well; otherwise,
+ * even we can destroy cl_page but the corresponding vmpage can't be reused.
+ */
+static inline bool lru_page_busy(struct client_obd *cli, struct cl_page *page)
+{
+ if (cl_page_in_use_noref(page))
+ return true;
+
+ if (cli->cl_cache->ccc_unstable_check) {
+ struct page *vmpage = cl_page_vmpage(page);
-struct cl_page *osc_page_init(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
-{
- struct osc_object *osc = cl2osc(obj);
- struct osc_page *opg;
- int result;
-
- OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
- if (opg != NULL) {
- void *oap = &opg->ops_oap;
-
- opg->ops_from = 0;
- opg->ops_to = CFS_PAGE_SIZE;
-
- result = osc_prep_async_page(osc_export(osc),
- NULL, osc->oo_oinfo, vmpage,
- cl_offset(obj, page->cp_index),
- &osc_async_page_ops,
- opg, (void **)&oap, 1, NULL);
- if (result == 0)
- cl_page_slice_add(page, &opg->ops_cl, obj,
- &osc_page_ops);
- /*
- * Cannot assert osc_page_protected() here as read-ahead
- * creates temporary pages outside of a lock.
- */
-#ifdef INVARIANT_CHECK
- opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
-#endif
- CFS_INIT_LIST_HEAD(&opg->ops_inflight);
- } else
- result = -ENOMEM;
- return ERR_PTR(result);
+ /* vmpage have two known users: cl_page and VM page cache */
+ if (page_count(vmpage) - page_mapcount(vmpage) > 2)
+ return true;
+ }
+ return false;
}
/**
- * Helper function called by osc_io_submit() for every page in an immediate
- * transfer (i.e., transferred synchronously).
+ * Drop @target of pages from LRU at most.
*/
-void osc_io_submit_page(const struct lu_env *env,
- struct osc_io *oio, struct osc_page *opg,
- enum cl_req_type crt)
+long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ long target, bool force)
{
- struct osc_async_page *oap = &opg->ops_oap;
- struct client_obd *cli = oap->oap_cli;
-
- LINVRNT(osc_page_protected(env, opg,
- crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
-
- oap->oap_page_off = opg->ops_from;
- oap->oap_count = opg->ops_to - opg->ops_from;
- oap->oap_brw_flags |= OBD_BRW_SYNC;
- if (oio->oi_lockless)
- oap->oap_brw_flags |= OBD_BRW_SRVLOCK;
-
- oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
- if (!client_is_remote(osc_export(cl2osc(opg->ops_cl.cpl_obj))) &&
- cfs_capable(CFS_CAP_SYS_RESOURCE)) {
- oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
- oap->oap_cmd |= OBD_BRW_NOQUOTA;
- }
+ struct cl_io *io;
+ struct cl_object *clobj = NULL;
+ struct cl_page **pvec;
+ struct osc_page *opg;
+ long count = 0;
+ int maxscan = 0;
+ int index = 0;
+ int rc = 0;
+ ENTRY;
+
+ LASSERT(atomic_long_read(&cli->cl_lru_in_list) >= 0);
+ if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0)
+ RETURN(0);
+
+ CDEBUG(D_CACHE, "%s: shrinkers: %d, force: %d\n",
+ cli_name(cli), atomic_read(&cli->cl_lru_shrinkers), force);
+ if (!force) {
+ if (atomic_read(&cli->cl_lru_shrinkers) > 0)
+ RETURN(-EBUSY);
+
+ if (atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
+ atomic_dec(&cli->cl_lru_shrinkers);
+ RETURN(-EBUSY);
+ }
+ } else {
+ atomic_inc(&cli->cl_lru_shrinkers);
+ }
+
+ pvec = (struct cl_page **)osc_env_info(env)->oti_pvec;
+ io = &osc_env_info(env)->oti_io;
+
+ spin_lock(&cli->cl_lru_list_lock);
+ if (force)
+ cli->cl_lru_reclaim++;
+ maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
+ while (!list_empty(&cli->cl_lru_list)) {
+ struct cl_page *page;
+ bool will_free = false;
+
+ if (!force && atomic_read(&cli->cl_lru_shrinkers) > 1)
+ break;
+
+ if (--maxscan < 0)
+ break;
+
+ opg = list_entry(cli->cl_lru_list.next, struct osc_page,
+ ops_lru);
+ page = opg->ops_cl.cpl_page;
+ if (lru_page_busy(cli, page)) {
+ list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ continue;
+ }
+
+ LASSERT(page->cp_obj != NULL);
+ if (clobj != page->cp_obj) {
+ struct cl_object *tmp = page->cp_obj;
+
+ cl_object_get(tmp);
+ spin_unlock(&cli->cl_lru_list_lock);
+
+ if (clobj != NULL) {
+ discard_pagevec(env, io, pvec, index);
+ index = 0;
+
+ cl_io_fini(env, io);
+ cl_object_put(env, clobj);
+ clobj = NULL;
+ }
+
+ clobj = tmp;
+ io->ci_obj = clobj;
+ io->ci_ignore_layout = 1;
+ rc = cl_io_init(env, io, CIT_MISC, clobj);
+
+ spin_lock(&cli->cl_lru_list_lock);
+
+ if (rc != 0)
+ break;
+
+ ++maxscan;
+ continue;
+ }
+
+ if (cl_page_own_try(env, io, page) == 0) {
+ if (!lru_page_busy(cli, page)) {
+ /* remove it from lru list earlier to avoid
+ * lock contention */
+ __osc_lru_del(cli, opg);
+ opg->ops_in_lru = 0; /* will be discarded */
+
+ cl_page_get(page);
+ will_free = true;
+ } else {
+ cl_page_disown(env, io, page);
+ }
+ }
+
+ if (!will_free) {
+ list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ continue;
+ }
+
+ /* Don't discard and free the page with cl_lru_list held */
+ pvec[index++] = page;
+ if (unlikely(index == OTI_PVEC_SIZE)) {
+ spin_unlock(&cli->cl_lru_list_lock);
+ discard_pagevec(env, io, pvec, index);
+ index = 0;
+
+ spin_lock(&cli->cl_lru_list_lock);
+ }
+
+ if (++count >= target)
+ break;
+ }
+ spin_unlock(&cli->cl_lru_list_lock);
+
+ if (clobj != NULL) {
+ discard_pagevec(env, io, pvec, index);
+
+ cl_io_fini(env, io);
+ cl_object_put(env, clobj);
+ }
+
+ atomic_dec(&cli->cl_lru_shrinkers);
+ if (count > 0) {
+ atomic_long_add(count, cli->cl_lru_left);
+ wake_up_all(&osc_lru_waitq);
+ }
+ RETURN(count > 0 ? count : rc);
+}
+
+/**
+ * Reclaim LRU pages by an IO thread. The caller wants to reclaim at least
+ * \@npages of LRU slots. For performance consideration, it's better to drop
+ * LRU pages in batch. Therefore, the actual number is adjusted at least
+ * max_pages_per_rpc.
+ */
+static long osc_lru_reclaim(struct client_obd *cli, unsigned long npages)
+{
+ struct lu_env *env;
+ struct cl_client_cache *cache = cli->cl_cache;
+ int max_scans;
+ __u16 refcheck;
+ long rc = 0;
+ ENTRY;
+
+ LASSERT(cache != NULL);
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(rc);
+
+ npages = max_t(int, npages, cli->cl_max_pages_per_rpc);
+ CDEBUG(D_CACHE, "%s: start to reclaim %ld pages from LRU\n",
+ cli_name(cli), npages);
+ rc = osc_lru_shrink(env, cli, npages, true);
+ if (rc >= npages) {
+ CDEBUG(D_CACHE, "%s: reclaimed %ld/%ld pages from LRU\n",
+ cli_name(cli), rc, npages);
+ if (osc_cache_too_much(cli) > 0)
+ ptlrpcd_queue_work(cli->cl_lru_work);
+ GOTO(out, rc);
+ } else if (rc > 0) {
+ npages -= rc;
+ }
+
+ CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld/%ld, want: %ld\n",
+ cli_name(cli), cli, atomic_long_read(&cli->cl_lru_in_list),
+ atomic_long_read(&cli->cl_lru_busy), npages);
+
+ /* Reclaim LRU slots from other client_obd as it can't free enough
+ * from its own. This should rarely happen. */
+ spin_lock(&cache->ccc_lru_lock);
+ LASSERT(!list_empty(&cache->ccc_lru));
+
+ cache->ccc_lru_shrinkers++;
+ list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
+
+ max_scans = atomic_read(&cache->ccc_users) - 2;
+ while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
+ cli = list_entry(cache->ccc_lru.next, struct client_obd,
+ cl_lru_osc);
+
+ CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
+ cli_name(cli), cli,
+ atomic_long_read(&cli->cl_lru_in_list),
+ atomic_long_read(&cli->cl_lru_busy));
+
+ list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
+ if (osc_cache_too_much(cli) > 0) {
+ spin_unlock(&cache->ccc_lru_lock);
+
+ rc = osc_lru_shrink(env, cli, npages, true);
+ spin_lock(&cache->ccc_lru_lock);
+ if (rc >= npages)
+ break;
+ if (rc > 0)
+ npages -= rc;
+ }
+ }
+ spin_unlock(&cache->ccc_lru_lock);
+
+out:
+ cl_env_put(env, &refcheck);
+ CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
+ cli_name(cli), cli, rc);
+ return rc;
+}
+
+/**
+ * osc_lru_alloc() is called to allocate an LRU slot for a cl_page.
+ *
+ * Usually the LRU slots are reserved in osc_io_iter_rw_init().
+ * Only in the case that the LRU slots are in extreme shortage, it should
+ * have reserved enough slots for an IO.
+ */
+static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli,
+ struct osc_page *opg)
+{
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ struct osc_io *oio = osc_env_io(env);
+ int rc = 0;
+ ENTRY;
+
+ if (cli->cl_cache == NULL) /* shall not be in LRU */
+ RETURN(0);
+
+ if (oio->oi_lru_reserved > 0) {
+ --oio->oi_lru_reserved;
+ goto out;
+ }
+
+ LASSERT(atomic_long_read(cli->cl_lru_left) >= 0);
+ while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) {
+ /* run out of LRU spaces, try to drop some by itself */
+ rc = osc_lru_reclaim(cli, 1);
+ if (rc < 0)
+ break;
+ if (rc > 0)
+ continue;
+
+ cond_resched();
+ rc = l_wait_event(osc_lru_waitq,
+ atomic_long_read(cli->cl_lru_left) > 0,
+ &lwi);
+ if (rc < 0)
+ break;
+ }
+
+out:
+ if (rc >= 0) {
+ atomic_long_inc(&cli->cl_lru_busy);
+ opg->ops_in_lru = 1;
+ rc = 0;
+ }
+
+ RETURN(rc);
+}
+
+/**
+ * osc_lru_reserve() is called to reserve enough LRU slots for I/O.
+ *
+ * The benefit of doing this is to reduce contention against atomic counter
+ * cl_lru_left by changing it from per-page access to per-IO access.
+ */
+unsigned long osc_lru_reserve(struct client_obd *cli, unsigned long npages)
+{
+ unsigned long reserved = 0;
+ unsigned long max_pages;
+ unsigned long c;
+
+ /* reserve a full RPC window at most to avoid that a thread accidentally
+ * consumes too many LRU slots */
+ max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
+ if (npages > max_pages)
+ npages = max_pages;
+
+ c = atomic_long_read(cli->cl_lru_left);
+ if (c < npages && osc_lru_reclaim(cli, npages) > 0)
+ c = atomic_long_read(cli->cl_lru_left);
+ while (c >= npages) {
+ if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
+ reserved = npages;
+ break;
+ }
+ c = atomic_long_read(cli->cl_lru_left);
+ }
+ if (atomic_long_read(cli->cl_lru_left) < max_pages) {
+ /* If there aren't enough pages in the per-OSC LRU then
+ * wake up the LRU thread to try and clear out space, so
+ * we don't block if pages are being dirtied quickly. */
+ CDEBUG(D_CACHE, "%s: queue LRU, left: %lu/%ld.\n",
+ cli_name(cli), atomic_long_read(cli->cl_lru_left),
+ max_pages);
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
+
+ return reserved;
+}
+
+/**
+ * osc_lru_unreserve() is called to unreserve LRU slots.
+ *
+ * LRU slots reserved by osc_lru_reserve() may have entries left due to several
+ * reasons such as page already existing or I/O error. Those reserved slots
+ * should be freed by calling this function.
+ */
+void osc_lru_unreserve(struct client_obd *cli, unsigned long npages)
+{
+ atomic_long_add(npages, cli->cl_lru_left);
+ wake_up_all(&osc_lru_waitq);
+}
+
+/**
+ * Atomic operations are expensive. We accumulate the accounting for the
+ * same page zone to get better performance.
+ * In practice this can work pretty good because the pages in the same RPC
+ * are likely from the same page zone.
+ */
+static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+ struct osc_brw_async_args *aa,
+ int factor)
+{
+ int page_count;
+ void *zone = NULL;
+ int count = 0;
+ int i;
+
+ if (desc != NULL) {
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+ page_count = desc->bd_iov_count;
+ } else {
+ page_count = aa->aa_page_count;
+ }
+
+ for (i = 0; i < page_count; i++) {
+ void *pz;
+ if (desc)
+ pz = page_zone(BD_GET_KIOV(desc, i).kiov_page);
+ else
+ pz = page_zone(aa->aa_ppga[i]->pg);
+
+ if (likely(pz == zone)) {
+ ++count;
+ continue;
+ }
+
+ if (count > 0) {
+ mod_zone_page_state(zone, NR_UNSTABLE_NFS,
+ factor * count);
+ count = 0;
+ }
+ zone = pz;
+ ++count;
+ }
+ if (count > 0)
+ mod_zone_page_state(zone, NR_UNSTABLE_NFS, factor * count);
+}
- oap->oap_async_flags |= OSC_FLAGS;
- if (oap->oap_cmd & OBD_BRW_READ)
- oap->oap_async_flags |= ASYNC_COUNT_STABLE;
- else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
- osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
+static inline void add_unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+ struct osc_brw_async_args *aa)
+{
+ unstable_page_accounting(desc, aa, 1);
+}
+
+static inline void dec_unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+ struct osc_brw_async_args *aa)
+{
+ unstable_page_accounting(desc, aa, -1);
+}
+
+/**
+ * Performs "unstable" page accounting. This function balances the
+ * increment operations performed in osc_inc_unstable_pages. It is
+ * registered as the RPC request callback, and is executed when the
+ * bulk RPC is committed on the server. Thus at this point, the pages
+ * involved in the bulk transfer are no longer considered unstable.
+ *
+ * If this function is called, the request should have been committed
+ * or req:rq_unstable must have been set; it implies that the unstable
+ * statistic have been added.
+ */
+void osc_dec_unstable_pages(struct ptlrpc_request *req)
+{
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ int page_count;
+ long unstable_count;
+
+ if (desc)
+ page_count = desc->bd_iov_count;
+ else
+ page_count = aa->aa_page_count;
- osc_oap_to_pending(oap);
- osc_page_transfer_get(opg, "transfer\0imm");
- osc_page_transfer_add(env, opg, crt);
+ LASSERT(page_count >= 0);
+
+ dec_unstable_page_accounting(desc, aa);
+
+ unstable_count = atomic_long_sub_return(page_count,
+ &cli->cl_unstable_count);
+ LASSERT(unstable_count >= 0);
+
+ unstable_count = atomic_long_sub_return(page_count,
+ &cli->cl_cache->ccc_unstable_nr);
+ LASSERT(unstable_count >= 0);
+ if (unstable_count == 0)
+ wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
+
+ if (waitqueue_active(&osc_lru_waitq))
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+}
+
+/**
+ * "unstable" page accounting. See: osc_dec_unstable_pages.
+ */
+void osc_inc_unstable_pages(struct ptlrpc_request *req)
+{
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ long page_count;
+
+ /* No unstable page tracking */
+ if (cli->cl_cache == NULL || !cli->cl_cache->ccc_unstable_check)
+ return;
+
+ if (desc)
+ page_count = desc->bd_iov_count;
+ else
+ page_count = aa->aa_page_count;
+
+ add_unstable_page_accounting(desc, aa);
+ atomic_long_add(page_count, &cli->cl_unstable_count);
+ atomic_long_add(page_count, &cli->cl_cache->ccc_unstable_nr);
+
+ /* If the request has already been committed (i.e. brw_commit
+ * called via rq_commit_cb), we need to undo the unstable page
+ * increments we just performed because rq_commit_cb wont be
+ * called again. */
+ spin_lock(&req->rq_lock);
+ if (unlikely(req->rq_committed)) {
+ spin_unlock(&req->rq_lock);
+
+ osc_dec_unstable_pages(req);
+ } else {
+ req->rq_unstable = 1;
+ spin_unlock(&req->rq_lock);
+ }
+}
+
+/**
+ * Check if it piggybacks SOFT_SYNC flag to OST from this OSC.
+ * This function will be called by every BRW RPC so it's critical
+ * to make this function fast.
+ */
+bool osc_over_unstable_soft_limit(struct client_obd *cli)
+{
+ long unstable_nr, osc_unstable_count;
+
+ /* Can't check cli->cl_unstable_count, therefore, no soft limit */
+ if (cli->cl_cache == NULL || !cli->cl_cache->ccc_unstable_check)
+ return false;
+
+ osc_unstable_count = atomic_long_read(&cli->cl_unstable_count);
+ unstable_nr = atomic_long_read(&cli->cl_cache->ccc_unstable_nr);
+
+ CDEBUG(D_CACHE,
+ "%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
+ cli_name(cli), cli, unstable_nr, osc_unstable_count);
+
+ /* If the LRU slots are in shortage - 25% remaining AND this OSC
+ * has one full RPC window of unstable pages, it's a good chance
+ * to piggyback a SOFT_SYNC flag.
+ * Please notice that the OST won't take immediate response for the
+ * SOFT_SYNC request so active OSCs will have more chance to carry
+ * the flag, this is reasonable. */
+ return unstable_nr > cli->cl_cache->ccc_lru_max >> 2 &&
+ osc_unstable_count > cli->cl_max_pages_per_rpc *
+ cli->cl_max_rpcs_in_flight;
+}
+
+/**
+ * Return how many LRU pages in the cache of all OSC devices
+ *
+ * \retval return # of cached LRU pages times reclaimation tendency
+ * \retval SHRINK_STOP if it cannot do any scanning in this time
+ */
+unsigned long osc_cache_shrink_count(struct shrinker *sk,
+ struct shrink_control *sc)
+{
+ struct client_obd *cli;
+ unsigned long cached = 0;
+
+ spin_lock(&osc_shrink_lock);
+ list_for_each_entry(cli, &osc_shrink_list, cl_shrink_list)
+ cached += atomic_long_read(&cli->cl_lru_in_list);
+ spin_unlock(&osc_shrink_lock);
+
+ return (cached * sysctl_vfs_cache_pressure) / 100;
+}
+
+/**
+ * Scan and try to reclaim sc->nr_to_scan cached LRU pages
+ *
+ * \retval number of cached LRU pages reclaimed
+ * \retval SHRINK_STOP if it cannot do any scanning in this time
+ *
+ * Linux kernel will loop calling this shrinker scan routine with
+ * sc->nr_to_scan = SHRINK_BATCH(128 for now) until kernel got enough memory.
+ *
+ * If sc->nr_to_scan is 0, the VM is querying the cache size, we don't need
+ * to scan and try to reclaim LRU pages, just return 0 and
+ * osc_cache_shrink_count() will report the LRU page number.
+ */
+unsigned long osc_cache_shrink_scan(struct shrinker *sk,
+ struct shrink_control *sc)
+{
+ struct client_obd *cli;
+ struct client_obd *stop_anchor = NULL;
+ struct lu_env *env;
+ long shrank = 0;
+ int rc;
+ __u16 refcheck;
+
+ if (sc->nr_to_scan == 0)
+ return 0;
+
+ if (!(sc->gfp_mask & __GFP_FS))
+ return SHRINK_STOP;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return SHRINK_STOP;
+
+ spin_lock(&osc_shrink_lock);
+ while (!list_empty(&osc_shrink_list)) {
+ cli = list_entry(osc_shrink_list.next, struct client_obd,
+ cl_shrink_list);
+
+ if (stop_anchor == NULL)
+ stop_anchor = cli;
+ else if (cli == stop_anchor)
+ break;
+
+ list_move_tail(&cli->cl_shrink_list, &osc_shrink_list);
+ spin_unlock(&osc_shrink_lock);
+
+ /* shrink no more than max_pages_per_rpc for an OSC */
+ rc = osc_lru_shrink(env, cli, (sc->nr_to_scan - shrank) >
+ cli->cl_max_pages_per_rpc ?
+ cli->cl_max_pages_per_rpc :
+ sc->nr_to_scan - shrank, true);
+ if (rc > 0)
+ shrank += rc;
+
+ if (shrank >= sc->nr_to_scan)
+ goto out;
+
+ spin_lock(&osc_shrink_lock);
+ }
+ spin_unlock(&osc_shrink_lock);
+
+out:
+ cl_env_put(env, &refcheck);
+
+ return shrank;
}
/** @} osc */