* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
descr->cld_mode = mode;
descr->cld_start = page->cp_index;
descr->cld_end = page->cp_index;
- cfs_spin_lock(&hdr->coh_lock_guard);
+ spin_lock(&hdr->coh_lock_guard);
cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
/*
* Lock-less sub-lock has to be either in HELD state
break;
}
}
- cfs_spin_unlock(&hdr->coh_lock_guard);
+ spin_unlock(&hdr->coh_lock_guard);
}
return result;
}
struct osc_page *opg = cl2osc_page(slice);
CDEBUG(D_TRACE, "%p\n", opg);
LASSERT(opg->ops_lock == NULL);
- OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
}
static void osc_page_transfer_get(struct osc_page *opg, const char *label)
* first and then use it as inflight. */
osc_lru_del(osc_cli(obj), opg, false);
- cfs_spin_lock(&obj->oo_seatbelt);
- cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
- opg->ops_submitter = cfs_current();
- cfs_spin_unlock(&obj->oo_seatbelt);
+ spin_lock(&obj->oo_seatbelt);
+ cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
+ opg->ops_submitter = cfs_current();
+ spin_unlock(&obj->oo_seatbelt);
}
static int osc_page_cache_add(const struct lu_env *env,
olock = osc_lock_at(lock);
if (cfs_atomic_inc_return(&olock->ols_pageref) <= 0) {
cfs_atomic_dec(&olock->ols_pageref);
- cl_lock_put(env, lock);
- rc = 1;
+ rc = -ENODATA;
} else {
+ cl_lock_get(lock);
opg->ops_lock = lock;
rc = 0;
}
struct cl_io *unused)
{
struct cl_lock *lock;
- int result;
+ int result = -ENODATA;
ENTRY;
lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
NULL, 1, 0);
- if (lock != NULL &&
- osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
- result = -EBUSY;
- else
- result = -ENODATA;
+ if (lock != NULL) {
+ if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
+ result = -EBUSY;
+ cl_lock_put(env, lock);
+ }
RETURN(result);
}
LASSERT(0);
}
- cfs_spin_lock(&obj->oo_seatbelt);
+ spin_lock(&obj->oo_seatbelt);
if (opg->ops_submitter != NULL) {
LASSERT(!cfs_list_empty(&opg->ops_inflight));
cfs_list_del_init(&opg->ops_inflight);
opg->ops_submitter = NULL;
}
- cfs_spin_unlock(&obj->oo_seatbelt);
+ spin_unlock(&obj->oo_seatbelt);
osc_lru_del(osc_cli(obj), opg, true);
EXIT;
opg->ops_from = from;
opg->ops_to = to;
- cfs_spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_COUNT_STABLE;
- cfs_spin_unlock(&oap->oap_lock);
+ spin_lock(&oap->oap_lock);
+ oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&oap->oap_lock);
}
static int osc_page_cancel(const struct lu_env *env,
.cpo_flush = osc_page_flush
};
-struct cl_page *osc_page_init(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+int osc_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage)
{
- struct osc_object *osc = cl2osc(obj);
- struct osc_page *opg;
- int result;
+ struct osc_object *osc = cl2osc(obj);
+ struct osc_page *opg = cl_object_page_slice(obj, page);
+ int result;
- OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
- if (opg != NULL) {
- opg->ops_from = 0;
- opg->ops_to = CFS_PAGE_SIZE;
-
- result = osc_prep_async_page(osc, opg, vmpage,
- cl_offset(obj, page->cp_index));
- if (result == 0) {
- struct osc_io *oio = osc_env_io(env);
- opg->ops_srvlock = osc_io_srvlock(oio);
- cl_page_slice_add(page, &opg->ops_cl, obj,
- &osc_page_ops);
- }
- /*
- * Cannot assert osc_page_protected() here as read-ahead
- * creates temporary pages outside of a lock.
- */
+ opg->ops_from = 0;
+ opg->ops_to = CFS_PAGE_SIZE;
+
+ result = osc_prep_async_page(osc, opg, vmpage,
+ cl_offset(obj, page->cp_index));
+ if (result == 0) {
+ struct osc_io *oio = osc_env_io(env);
+ opg->ops_srvlock = osc_io_srvlock(oio);
+ cl_page_slice_add(page, &opg->ops_cl, obj,
+ &osc_page_ops);
+ }
+ /*
+ * Cannot assert osc_page_protected() here as read-ahead
+ * creates temporary pages outside of a lock.
+ */
#ifdef INVARIANT_CHECK
- opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
+ opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
#endif
- /* ops_inflight and ops_lru are the same field, but it doesn't
- * hurt to initialize it twice :-) */
- CFS_INIT_LIST_HEAD(&opg->ops_inflight);
- CFS_INIT_LIST_HEAD(&opg->ops_lru);
- } else
- result = -ENOMEM;
+ /* ops_inflight and ops_lru are the same field, but it doesn't
+ * hurt to initialize it twice :-) */
+ CFS_INIT_LIST_HEAD(&opg->ops_inflight);
+ CFS_INIT_LIST_HEAD(&opg->ops_lru);
/* reserve an LRU space for this page */
if (page->cp_type == CPT_CACHEABLE && result == 0)
result = osc_lru_reserve(env, osc, opg);
- return ERR_PTR(result);
+ return result;
}
/**
* Return how many LRU pages should be freed. */
static int osc_cache_too_much(struct client_obd *cli)
{
- struct cl_client_lru *lru = cli->cl_lru;
+ struct cl_client_cache *cache = cli->cl_cache;
int pages = cfs_atomic_read(&cli->cl_lru_in_list) >> 1;
if (cfs_atomic_read(&osc_lru_waiters) > 0 &&
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain faireness among OSCs. */
- if (cfs_atomic_read(cli->cl_lru_left) < lru->ccl_page_max >> 4) {
- unsigned long budget;
+ if (cfs_atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+ unsigned long tmp;
- budget = lru->ccl_page_max / cfs_atomic_read(&lru->ccl_users);
- if (pages > budget)
+ tmp = cache->ccc_lru_max / cfs_atomic_read(&cache->ccc_users);
+ if (pages > tmp)
return min(pages, lru_shrink_max);
return pages > lru_shrink_min ? lru_shrink_min : 0;
* This check is necessary to avoid freeing the pages
* having already been removed from LRU and pinned
* for IO. */
- if (cfs_atomic_read(&page->cp_ref) == 1) {
+ if (!cl_page_in_use(page)) {
cl_page_unmap(env, io, page);
cl_page_discard(env, io, page);
++count;
opg = cfs_list_entry(cli->cl_lru_list.next, struct osc_page,
ops_lru);
page = cl_page_top(opg->ops_cl.cpl_page);
- if (page->cp_state == CPS_FREEING ||
- cfs_atomic_read(&page->cp_ref) > 0) {
+ if (cl_page_in_use_noref(page)) {
cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
continue;
}
static int osc_lru_reclaim(struct client_obd *cli)
{
- struct cl_client_lru *lru = cli->cl_lru;
+ struct cl_client_cache *cache = cli->cl_cache;
struct client_obd *victim;
struct client_obd *tmp;
int rc;
- LASSERT(lru != NULL);
- LASSERT(!cfs_list_empty(&lru->ccl_list));
+ LASSERT(cache != NULL);
+ LASSERT(!cfs_list_empty(&cache->ccc_lru));
rc = osc_lru_shrink(cli, lru_shrink_min);
if (rc > 0) {
/* Reclaim LRU slots from other client_obd as it can't free enough
* from its own. This should rarely happen. */
- cfs_spin_lock(&lru->ccl_lock);
- lru->ccl_reclaim_count++;
- cfs_list_move_tail(&cli->cl_lru_osc, &lru->ccl_list);
- cfs_list_for_each_entry_safe(victim, tmp, &lru->ccl_list, cl_lru_osc) {
+ spin_lock(&cache->ccc_lru_lock);
+ cache->ccc_lru_shrinkers++;
+ cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
+ cfs_list_for_each_entry_safe(victim, tmp, &cache->ccc_lru, cl_lru_osc) {
if (victim == cli)
break;
cfs_atomic_read(&victim->cl_lru_in_list),
cfs_atomic_read(&victim->cl_lru_busy));
- cfs_list_move_tail(&victim->cl_lru_osc, &lru->ccl_list);
+ cfs_list_move_tail(&victim->cl_lru_osc, &cache->ccc_lru);
if (cfs_atomic_read(&victim->cl_lru_in_list) > 0)
break;
}
- cfs_spin_unlock(&lru->ccl_lock);
+ spin_unlock(&cache->ccc_lru_lock);
if (victim == cli) {
CDEBUG(D_CACHE, "%s: can't get any free LRU slots.\n",
cli->cl_import->imp_obd->obd_name);
int rc = 0;
ENTRY;
- if (cli->cl_lru == NULL) /* shall not be in LRU */
+ if (cli->cl_cache == NULL) /* shall not be in LRU */
RETURN(0);
LASSERT(cfs_atomic_read(cli->cl_lru_left) >= 0);