*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_OSC
+#include <lustre_osc.h>
-#include "osc_cl_internal.h"
+#include "osc_internal.h"
static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
static void osc_lru_use(struct client_obd *cli, struct osc_page *opg);
-static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
- struct osc_page *opg);
+static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli,
+ struct osc_page *opg);
/** \addtogroup osc
* @{
*/
/*
- * Comment out osc_page_protected because it may sleep inside the
- * the client_obd_list_lock.
- * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
- * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
- * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
- */
-#if 0
-static int osc_page_is_dlocked(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int pending, int unref)
-{
- struct cl_page *page;
- struct osc_object *obj;
- struct osc_thread_info *info;
- struct ldlm_res_id *resname;
- struct lustre_handle *lockh;
- union ldlm_policy_data *policy;
- enum ldlm_mode dlmmode;
- __u64 flags;
-
- might_sleep();
-
- info = osc_env_info(env);
- resname = &info->oti_resname;
- policy = &info->oti_policy;
- lockh = &info->oti_handle;
- page = opg->ops_cl.cpl_page;
- obj = cl2osc(opg->ops_cl.cpl_obj);
-
- flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
- if (pending)
- flags |= LDLM_FL_CBPENDING;
-
- dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
- osc_lock_build_res(env, obj, resname);
- osc_index2policy(policy, page->cp_obj, osc_index(opg), osc_index(opg));
- return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
- dlmmode, &flags, NULL, lockh, unref);
-}
-
-/**
- * Checks an invariant that a page in the cache is covered by a lock, as
- * needed.
- */
-static int osc_page_protected(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int unref)
-{
- struct cl_object_header *hdr;
- struct cl_lock *scan;
- struct cl_page *page;
- struct cl_lock_descr *descr;
- int result;
-
- LINVRNT(!opg->ops_temp);
-
- page = opg->ops_cl.cpl_page;
- if (page->cp_owner != NULL &&
- cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
- /*
- * If IO is done without locks (liblustre, or lloop), lock is
- * not required.
- */
- result = 1;
- else
- /* otherwise check for a DLM lock */
- result = osc_page_is_dlocked(env, opg, mode, 1, unref);
- if (result == 0) {
- /* maybe this page is a part of a lockless io? */
- hdr = cl_object_header(opg->ops_cl.cpl_obj);
- descr = &osc_env_info(env)->oti_descr;
- descr->cld_mode = mode;
- descr->cld_start = osc_index(opg);
- descr->cld_end = osc_index(opg);
- spin_lock(&hdr->coh_lock_guard);
- list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
- /*
- * Lock-less sub-lock has to be either in HELD state
- * (when io is actively going on), or in CACHED state,
- * when top-lock is being unlocked:
- * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
- */
- if ((scan->cll_state == CLS_HELD ||
- scan->cll_state == CLS_CACHED) &&
- cl_lock_ext_match(&scan->cll_descr, descr)) {
- struct osc_lock *olck;
-
- olck = osc_lock_at(scan);
- result = osc_lock_is_lockless(olck);
- break;
- }
- }
- spin_unlock(&hdr->coh_lock_guard);
- }
- return result;
-}
-#else
-static int osc_page_protected(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int unref)
-{
- return 1;
-}
-#endif
-
-/*****************************************************************************
- *
* Page operations.
- *
*/
static void osc_page_transfer_get(struct osc_page *opg, const char *label)
{
int result;
ENTRY;
- LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
-
osc_page_transfer_get(opg, "transfer\0cache");
result = osc_queue_async_io(env, io, opg);
if (result != 0)
return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p %lu: "
"1< %#x %d %u %s %s > "
- "2< "LPD64" %u %u %#x %#x | %p %p %p > "
+ "2< %lld %u %u %#x %#x | %p %p %p > "
"3< %d %lu %d > "
"4< %d %d %d %lu %s | %s %s %s %s > "
"5< %s %s %s %s | %d %s | %d %s %s>\n",
}
static void osc_page_delete(const struct lu_env *env,
- const struct cl_page_slice *slice)
+ const struct cl_page_slice *slice)
{
struct osc_page *opg = cl2osc_page(slice);
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- int rc;
+ int rc;
- LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
-
- ENTRY;
- CDEBUG(D_TRACE, "%p\n", opg);
- osc_page_transfer_put(env, opg);
+ ENTRY;
+ CDEBUG(D_TRACE, "%p\n", opg);
+ osc_page_transfer_put(env, opg);
rc = osc_teardown_async_page(env, obj, opg);
if (rc) {
CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page,
const struct cl_page_slice *slice,
int from, int to)
{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_async_page *oap = &opg->ops_oap;
-
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
+ struct osc_page *opg = cl2osc_page(slice);
+ struct osc_async_page *oap = &opg->ops_oap;
- opg->ops_from = from;
- opg->ops_to = to;
+ opg->ops_from = from;
+ opg->ops_to = to;
spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
spin_unlock(&oap->oap_lock);
}
static int osc_page_cancel(const struct lu_env *env,
- const struct cl_page_slice *slice)
+ const struct cl_page_slice *slice)
{
struct osc_page *opg = cl2osc_page(slice);
- int rc = 0;
-
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
+ int rc = 0;
- /* Check if the transferring against this page
- * is completed, or not even queued. */
- if (opg->ops_transfer_pinned)
- /* FIXME: may not be interrupted.. */
+ /* Check if the transferring against this page
+ * is completed, or not even queued. */
+ if (opg->ops_transfer_pinned)
+ /* FIXME: may not be interrupted.. */
rc = osc_cancel_async_page(env, opg);
- LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
- return rc;
+ LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
+ return rc;
}
static int osc_page_flush(const struct lu_env *env,
{
struct osc_object *osc = cl2osc(obj);
struct osc_page *opg = cl_object_page_slice(obj, page);
+ struct osc_io *oio = osc_env_io(env);
int result;
opg->ops_from = 0;
- opg->ops_to = PAGE_CACHE_SIZE;
+ opg->ops_to = PAGE_SIZE;
+
+ INIT_LIST_HEAD(&opg->ops_lru);
result = osc_prep_async_page(osc, opg, page->cp_vmpage,
cl_offset(obj, index));
- if (result == 0) {
- struct osc_io *oio = osc_env_io(env);
- opg->ops_srvlock = osc_io_srvlock(oio);
- cl_page_slice_add(page, &opg->ops_cl, obj, index,
- &osc_page_ops);
- }
- /*
- * Cannot assert osc_page_protected() here as read-ahead
- * creates temporary pages outside of a lock.
- */
-#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
- opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
-#endif
- INIT_LIST_HEAD(&opg->ops_lru);
+ if (result != 0)
+ return result;
+
+ opg->ops_srvlock = osc_io_srvlock(oio);
+ cl_page_slice_add(page, &opg->ops_cl, obj, index,
+ &osc_page_ops);
+
/* reserve an LRU space for this page */
- if (page->cp_type == CPT_CACHEABLE && result == 0) {
- result = osc_lru_reserve(env, osc, opg);
+ if (page->cp_type == CPT_CACHEABLE) {
+ result = osc_lru_alloc(env, osc_cli(osc), opg);
if (result == 0) {
- spin_lock(&osc->oo_tree_lock);
- result = radix_tree_insert(&osc->oo_tree, index, opg);
- if (result == 0)
- ++osc->oo_npages;
- spin_unlock(&osc->oo_tree_lock);
- LASSERT(result == 0);
+ result = radix_tree_preload(GFP_NOFS);
+ if (result == 0) {
+ spin_lock(&osc->oo_tree_lock);
+ result = radix_tree_insert(&osc->oo_tree,
+ index, opg);
+ if (result == 0)
+ ++osc->oo_npages;
+ spin_unlock(&osc->oo_tree_lock);
+
+ radix_tree_preload_end();
+ }
}
}
enum cl_req_type crt, int brw_flags)
{
struct osc_async_page *oap = &opg->ops_oap;
- struct osc_object *obj = oap->oap_obj;
-
- LINVRNT(osc_page_protected(env, opg,
- crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
"magic 0x%x\n", oap, oap->oap_magic);
oap->oap_count = opg->ops_to - opg->ops_from;
oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
- if (!client_is_remote(osc_export(obj)) &&
- cfs_capable(CFS_CAP_SYS_RESOURCE)) {
+ if (cfs_capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
oap->oap_cmd |= OBD_BRW_NOQUOTA;
}
else if (pages >= budget / 2)
return lru_shrink_min(cli);
} else {
- int duration = cfs_time_current_sec() - cli->cl_lru_last_used;
+ time64_t duration = ktime_get_real_seconds();
+ long timediff;
/* knock out pages by duration of no IO activity */
- duration >>= 6; /* approximately 1 minute */
- if (duration > 0 && pages >= budget / duration)
+ duration -= cli->cl_lru_last_used;
+ /*
+ * The difference shouldn't be more than 70 years
+ * so we can safely case to a long. Round to
+ * approximately 1 minute.
+ */
+ timediff = (long)(duration >> 6);
+ if (timediff > 0 && pages >= budget / timediff)
return lru_shrink_min(cli);
}
return 0;
list_splice_tail(&lru, &cli->cl_lru_list);
atomic_long_sub(npages, &cli->cl_lru_busy);
atomic_long_add(npages, &cli->cl_lru_in_list);
- cli->cl_lru_last_used = cfs_time_current_sec();
+ cli->cl_lru_last_used = ktime_get_real_seconds();
spin_unlock(&cli->cl_lru_list_lock);
if (waitqueue_active(&osc_lru_waitq))
}
/**
- * Delete page from LRUlist for redirty.
+ * Delete page from LRU list for redirty.
*/
static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
{
/* If page is being transferred for the first time,
* ops_lru should be empty */
- if (opg->ops_in_lru && !list_empty(&opg->ops_lru)) {
+ if (opg->ops_in_lru) {
spin_lock(&cli->cl_lru_list_lock);
- __osc_lru_del(cli, opg);
+ if (!list_empty(&opg->ops_lru)) {
+ __osc_lru_del(cli, opg);
+ atomic_long_inc(&cli->cl_lru_busy);
+ }
spin_unlock(&cli->cl_lru_list_lock);
- atomic_long_inc(&cli->cl_lru_busy);
}
}
* LRU pages in batch. Therefore, the actual number is adjusted at least
* max_pages_per_rpc.
*/
-long osc_lru_reclaim(struct client_obd *cli, unsigned long npages)
+static long osc_lru_reclaim(struct client_obd *cli, unsigned long npages)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct cl_client_cache *cache = cli->cl_cache;
int max_scans;
+ __u16 refcheck;
long rc = 0;
ENTRY;
LASSERT(cache != NULL);
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
RETURN(rc);
spin_unlock(&cache->ccc_lru_lock);
out:
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
cli_name(cli), cli, rc);
return rc;
}
/**
- * osc_lru_reserve() is called to reserve an LRU slot for a cl_page.
+ * osc_lru_alloc() is called to allocate an LRU slot for a cl_page.
*
* Usually the LRU slots are reserved in osc_io_iter_rw_init().
* Only in the case that the LRU slots are in extreme shortage, it should
* have reserved enough slots for an IO.
*/
-static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
- struct osc_page *opg)
+static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli,
+ struct osc_page *opg)
{
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
struct osc_io *oio = osc_env_io(env);
- struct client_obd *cli = osc_cli(obj);
int rc = 0;
ENTRY;
}
/**
+ * osc_lru_reserve() is called to reserve enough LRU slots for I/O.
+ *
+ * The benefit of doing this is to reduce contention against atomic counter
+ * cl_lru_left by changing it from per-page access to per-IO access.
+ */
+unsigned long osc_lru_reserve(struct client_obd *cli, unsigned long npages)
+{
+ unsigned long reserved = 0;
+ unsigned long max_pages;
+ unsigned long c;
+
+ /* reserve a full RPC window at most to avoid that a thread accidentally
+ * consumes too many LRU slots */
+ max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
+ if (npages > max_pages)
+ npages = max_pages;
+
+ c = atomic_long_read(cli->cl_lru_left);
+ if (c < npages && osc_lru_reclaim(cli, npages) > 0)
+ c = atomic_long_read(cli->cl_lru_left);
+ while (c >= npages) {
+ if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
+ reserved = npages;
+ break;
+ }
+ c = atomic_long_read(cli->cl_lru_left);
+ }
+ if (atomic_long_read(cli->cl_lru_left) < max_pages) {
+ /* If there aren't enough pages in the per-OSC LRU then
+ * wake up the LRU thread to try and clear out space, so
+ * we don't block if pages are being dirtied quickly. */
+ CDEBUG(D_CACHE, "%s: queue LRU, left: %lu/%ld.\n",
+ cli_name(cli), atomic_long_read(cli->cl_lru_left),
+ max_pages);
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
+
+ return reserved;
+}
+
+/**
+ * osc_lru_unreserve() is called to unreserve LRU slots.
+ *
+ * LRU slots reserved by osc_lru_reserve() may have entries left due to several
+ * reasons such as page already existing or I/O error. Those reserved slots
+ * should be freed by calling this function.
+ */
+void osc_lru_unreserve(struct client_obd *cli, unsigned long npages)
+{
+ atomic_long_add(npages, cli->cl_lru_left);
+ wake_up_all(&osc_lru_waitq);
+}
+
+/**
* Atomic operations are expensive. We accumulate the accounting for the
* same page zone to get better performance.
* In practice this can work pretty good because the pages in the same RPC
* are likely from the same page zone.
*/
static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+ struct osc_brw_async_args *aa,
int factor)
{
- int page_count = desc->bd_iov_count;
+ int page_count;
void *zone = NULL;
int count = 0;
int i;
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+ if (desc != NULL) {
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+ page_count = desc->bd_iov_count;
+ } else {
+ page_count = aa->aa_page_count;
+ }
for (i = 0; i < page_count; i++) {
- void *pz = page_zone(BD_GET_KIOV(desc, i).kiov_page);
+ void *pz;
+ if (desc)
+ pz = page_zone(BD_GET_KIOV(desc, i).kiov_page);
+ else
+ pz = page_zone(aa->aa_ppga[i]->pg);
if (likely(pz == zone)) {
++count;
mod_zone_page_state(zone, NR_UNSTABLE_NFS, factor * count);
}
-static inline void add_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+static inline void add_unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+ struct osc_brw_async_args *aa)
{
- unstable_page_accounting(desc, 1);
+ unstable_page_accounting(desc, aa, 1);
}
-static inline void dec_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+static inline void dec_unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+ struct osc_brw_async_args *aa)
{
- unstable_page_accounting(desc, -1);
+ unstable_page_accounting(desc, aa, -1);
}
/**
void osc_dec_unstable_pages(struct ptlrpc_request *req)
{
struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- int page_count = desc->bd_iov_count;
+ int page_count;
long unstable_count;
+ if (desc)
+ page_count = desc->bd_iov_count;
+ else
+ page_count = aa->aa_page_count;
+
LASSERT(page_count >= 0);
- dec_unstable_page_accounting(desc);
+
+ dec_unstable_page_accounting(desc, aa);
unstable_count = atomic_long_sub_return(page_count,
&cli->cl_unstable_count);
void osc_inc_unstable_pages(struct ptlrpc_request *req)
{
struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- long page_count = desc->bd_iov_count;
+ long page_count;
/* No unstable page tracking */
if (cli->cl_cache == NULL || !cli->cl_cache->ccc_unstable_check)
return;
- add_unstable_page_accounting(desc);
+ if (desc)
+ page_count = desc->bd_iov_count;
+ else
+ page_count = aa->aa_page_count;
+
+ add_unstable_page_accounting(desc, aa);
atomic_long_add(page_count, &cli->cl_unstable_count);
atomic_long_add(page_count, &cli->cl_cache->ccc_unstable_nr);
cli->cl_max_rpcs_in_flight;
}
+/**
+ * Return how many LRU pages in the cache of all OSC devices
+ *
+ * \retval return # of cached LRU pages times reclaimation tendency
+ * \retval SHRINK_STOP if it cannot do any scanning in this time
+ */
+unsigned long osc_cache_shrink_count(struct shrinker *sk,
+ struct shrink_control *sc)
+{
+ struct client_obd *cli;
+ unsigned long cached = 0;
+
+ spin_lock(&osc_shrink_lock);
+ list_for_each_entry(cli, &osc_shrink_list, cl_shrink_list)
+ cached += atomic_long_read(&cli->cl_lru_in_list);
+ spin_unlock(&osc_shrink_lock);
+
+ return (cached * sysctl_vfs_cache_pressure) / 100;
+}
+
+/**
+ * Scan and try to reclaim sc->nr_to_scan cached LRU pages
+ *
+ * \retval number of cached LRU pages reclaimed
+ * \retval SHRINK_STOP if it cannot do any scanning in this time
+ *
+ * Linux kernel will loop calling this shrinker scan routine with
+ * sc->nr_to_scan = SHRINK_BATCH(128 for now) until kernel got enough memory.
+ *
+ * If sc->nr_to_scan is 0, the VM is querying the cache size, we don't need
+ * to scan and try to reclaim LRU pages, just return 0 and
+ * osc_cache_shrink_count() will report the LRU page number.
+ */
+unsigned long osc_cache_shrink_scan(struct shrinker *sk,
+ struct shrink_control *sc)
+{
+ struct client_obd *cli;
+ struct client_obd *stop_anchor = NULL;
+ struct lu_env *env;
+ long shrank = 0;
+ int rc;
+ __u16 refcheck;
+
+ if (sc->nr_to_scan == 0)
+ return 0;
+
+ if (!(sc->gfp_mask & __GFP_FS))
+ return SHRINK_STOP;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return SHRINK_STOP;
+
+ spin_lock(&osc_shrink_lock);
+ while (!list_empty(&osc_shrink_list)) {
+ cli = list_entry(osc_shrink_list.next, struct client_obd,
+ cl_shrink_list);
+
+ if (stop_anchor == NULL)
+ stop_anchor = cli;
+ else if (cli == stop_anchor)
+ break;
+
+ list_move_tail(&cli->cl_shrink_list, &osc_shrink_list);
+ spin_unlock(&osc_shrink_lock);
+
+ /* shrink no more than max_pages_per_rpc for an OSC */
+ rc = osc_lru_shrink(env, cli, (sc->nr_to_scan - shrank) >
+ cli->cl_max_pages_per_rpc ?
+ cli->cl_max_pages_per_rpc :
+ sc->nr_to_scan - shrank, true);
+ if (rc > 0)
+ shrank += rc;
+
+ if (shrank >= sc->nr_to_scan)
+ goto out;
+
+ spin_lock(&osc_shrink_lock);
+ }
+ spin_unlock(&osc_shrink_lock);
+
+out:
+ cl_env_put(env, &refcheck);
+
+ return shrank;
+}
+
/** @} osc */