* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
const struct cl_page_slice *slice;
ENTRY;
- cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
RETURN(slice);
}
struct cl_object *obj = page->cp_obj;
int pagesize = cl_object_header(obj)->coh_page_bufsize;
- PASSERT(env, page, cfs_list_empty(&page->cp_batch));
+ PASSERT(env, page, list_empty(&page->cp_batch));
PASSERT(env, page, page->cp_owner == NULL);
PASSERT(env, page, page->cp_req == NULL);
PASSERT(env, page, page->cp_state == CPS_FREEING);
ENTRY;
- while (!cfs_list_empty(&page->cp_layers)) {
+ while (!list_empty(&page->cp_layers)) {
struct cl_page_slice *slice;
- slice = cfs_list_entry(page->cp_layers.next,
- struct cl_page_slice, cpl_linkage);
- cfs_list_del_init(page->cp_layers.next);
+ slice = list_entry(page->cp_layers.next,
+ struct cl_page_slice, cpl_linkage);
+ list_del_init(page->cp_layers.next);
if (unlikely(slice->cpl_ops->cpo_fini != NULL))
slice->cpl_ops->cpo_fini(env, slice);
}
ENTRY;
OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
- __GFP_IO);
+ GFP_NOFS);
if (page != NULL) {
int result = 0;
atomic_set(&page->cp_ref, 1);
page->cp_vmpage = vmpage;
cl_page_state_set_trust(page, CPS_CACHED);
page->cp_type = type;
- CFS_INIT_LIST_HEAD(&page->cp_layers);
- CFS_INIT_LIST_HEAD(&page->cp_batch);
- CFS_INIT_LIST_HEAD(&page->cp_flight);
- mutex_init(&page->cp_mutex);
+ INIT_LIST_HEAD(&page->cp_layers);
+ INIT_LIST_HEAD(&page->cp_batch);
+ INIT_LIST_HEAD(&page->cp_flight);
lu_ref_init(&page->cp_reference);
head = o->co_lu.lo_header;
- cfs_list_for_each_entry(o, &head->loh_layers,
- co_lu.lo_linkage) {
+ list_for_each_entry(o, &head->loh_layers,
+ co_lu.lo_linkage) {
if (o->co_ops->coo_page_init != NULL) {
result = o->co_ops->coo_page_init(env, o, page,
ind);
}
RETURN(page);
}
-EXPORT_SYMBOL(cl_page_alloc);
/**
* Returns a cl_page with index \a idx at the object \a o, and associated with
static inline int cl_page_invariant(const struct cl_page *pg)
{
- /*
- * Page invariant is protected by a VM lock.
- */
- LINVRNT(cl_page_is_vmlocked(NULL, pg));
-
return cl_page_in_use_noref(pg);
}
LASSERT(atomic_read(&page->cp_ref) == 0);
PASSERT(env, page, page->cp_owner == NULL);
- PASSERT(env, page, cfs_list_empty(&page->cp_batch));
+ PASSERT(env, page, list_empty(&page->cp_batch));
/*
* Page is no longer reachable by other threads. Tear
* it down.
#define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
-#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
-({ \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- int __result; \
- ptrdiff_t __op = (_op); \
- int (*__method)_proto; \
+#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
+({ \
+ const struct lu_env *__env = (_env); \
+ struct cl_page *__page = (_page); \
+ const struct cl_page_slice *__scan; \
+ int __result; \
+ ptrdiff_t __op = (_op); \
+ int (*__method)_proto; \
\
- __result = 0; \
- cfs_list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + __op); \
- if (__method != NULL) { \
+ __result = 0; \
+ list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method != NULL) { \
__result = (*__method)(__env, __scan, ## __VA_ARGS__); \
- if (__result != 0) \
- break; \
- } \
- } \
- if (__result > 0) \
- __result = 0; \
- __result; \
+ if (__result != 0) \
+ break; \
+ } \
+ } \
+ if (__result > 0) \
+ __result = 0; \
+ __result; \
})
#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
ptrdiff_t __op = (_op); \
void (*__method)_proto; \
\
- cfs_list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
+ list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
__method = *(void **)((char *)__scan->cpl_ops + __op); \
if (__method != NULL) \
(*__method)(__env, __scan, ## __VA_ARGS__); \
void (*__method)_proto; \
\
/* get to the bottom page. */ \
- cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
- cpl_linkage) { \
+ list_for_each_entry_reverse(__scan, &__page->cp_layers, \
+ cpl_linkage) { \
__method = *(void **)((char *)__scan->cpl_ops + __op); \
if (__method != NULL) \
(*__method)(__env, __scan, ## __VA_ARGS__); \
LASSERT(page->cp_owner->ci_owned_nr > 0);
page->cp_owner->ci_owned_nr--;
page->cp_owner = NULL;
- page->cp_task = NULL;
}
EXIT;
}
PASSERT(env, pg, pg->cp_owner == NULL);
PASSERT(env, pg, pg->cp_req == NULL);
pg->cp_owner = cl_io_top(io);;
- pg->cp_task = current;
cl_page_owner_set(pg);
if (pg->cp_state != CPS_FREEING) {
cl_page_state_set(env, pg, CPS_OWNED);
cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
PASSERT(env, pg, pg->cp_owner == NULL);
pg->cp_owner = cl_io_top(io);
- pg->cp_task = current;
cl_page_owner_set(pg);
cl_page_state_set(env, pg, CPS_OWNED);
EXIT;
/**
* Version of cl_page_delete() that can be called for not fully constructed
- * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
+ * pages, e.g. in an error handling cl_page_find()->cl_page_delete0()
* path. Doesn't check page invariant.
*/
static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
ENTRY;
CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
- if (crt == CRT_READ && ioret == 0) {
- PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
- pg->cp_flags |= CPF_READ_COMPLETED;
- }
-
cl_page_state_set(env, pg, CPS_CACHED);
if (crt >= CRT_NR)
return;
(const struct lu_env *,
const struct cl_page_slice *, int), ioret);
if (anchor) {
- LASSERT(cl_page_is_vmlocked(env, pg));
LASSERT(pg->cp_sync_io == anchor);
pg->cp_sync_io = NULL;
}
*/
cl_page_put(env, pg);
- if (anchor)
- cl_sync_io_note(anchor, ioret);
+ if (anchor != NULL)
+ cl_sync_io_note(env, anchor, ioret);
- EXIT;
+ EXIT;
}
EXPORT_SYMBOL(cl_page_completion);
EXPORT_SYMBOL(cl_page_flush);
/**
- * Checks whether page is protected by any extent lock is at least required
- * mode.
- *
- * \return the same as in cl_page_operations::cpo_is_under_lock() method.
- * \see cl_page_operations::cpo_is_under_lock()
- */
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
-{
- int rc;
-
- PINVRNT(env, page, cl_page_invariant(page));
-
- ENTRY;
- rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
- PASSERT(env, page, rc != 0);
- RETURN(rc);
-}
-EXPORT_SYMBOL(cl_page_is_under_lock);
-
-/**
* Tells transfer engine that only part of a page is to be transmitted.
*
* \see cl_page_operations::cpo_clip()
lu_printer_t printer, const struct cl_page *pg)
{
(*printer)(env, cookie,
- "page@%p[%d %p %d %d %d %p %p %#x]\n",
+ "page@%p[%d %p %d %d %d %p %p]\n",
pg, atomic_read(&pg->cp_ref), pg->cp_obj,
pg->cp_state, pg->cp_error, pg->cp_type,
- pg->cp_owner, pg->cp_req, pg->cp_flags);
+ pg->cp_owner, pg->cp_req);
}
EXPORT_SYMBOL(cl_page_header_print);
(const struct lu_env *,
const struct cl_page_slice *));
}
-EXPORT_SYMBOL(cl_page_cancel);
/**
* Converts a byte offset within object \a obj into a page index.
}
EXPORT_SYMBOL(cl_index);
-int cl_page_size(const struct cl_object *obj)
+size_t cl_page_size(const struct cl_object *obj)
{
- return 1 << PAGE_CACHE_SHIFT;
+ return 1UL << PAGE_CACHE_SHIFT;
}
EXPORT_SYMBOL(cl_page_size);
* \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
*/
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj,
- const struct cl_page_operations *ops)
+ struct cl_object *obj, pgoff_t index,
+ const struct cl_page_operations *ops)
{
- ENTRY;
- cfs_list_add_tail(&slice->cpl_linkage, &page->cp_layers);
- slice->cpl_obj = obj;
- slice->cpl_ops = ops;
- slice->cpl_page = page;
- EXIT;
+ ENTRY;
+ list_add_tail(&slice->cpl_linkage, &page->cp_layers);
+ slice->cpl_obj = obj;
+ slice->cpl_index = index;
+ slice->cpl_ops = ops;
+ slice->cpl_page = page;
+ EXIT;
}
EXPORT_SYMBOL(cl_page_slice_add);
-int cl_page_init(void)
+/**
+ * Allocate and initialize cl_cache, called by ll_init_sbi().
+ */
+struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
+{
+ struct cl_client_cache *cache = NULL;
+
+ ENTRY;
+ OBD_ALLOC(cache, sizeof(*cache));
+ if (cache == NULL)
+ RETURN(NULL);
+
+ /* Initialize cache data */
+ atomic_set(&cache->ccc_users, 1);
+ cache->ccc_lru_max = lru_page_max;
+ atomic_long_set(&cache->ccc_lru_left, lru_page_max);
+ spin_lock_init(&cache->ccc_lru_lock);
+ INIT_LIST_HEAD(&cache->ccc_lru);
+
+ /* turn unstable check off by default as it impacts performance */
+ cache->ccc_unstable_check = 0;
+ atomic_long_set(&cache->ccc_unstable_nr, 0);
+ init_waitqueue_head(&cache->ccc_unstable_waitq);
+
+ RETURN(cache);
+}
+EXPORT_SYMBOL(cl_cache_init);
+
+/**
+ * Increase cl_cache refcount
+ */
+void cl_cache_incref(struct cl_client_cache *cache)
{
- return 0;
+ atomic_inc(&cache->ccc_users);
}
+EXPORT_SYMBOL(cl_cache_incref);
-void cl_page_fini(void)
+/**
+ * Decrease cl_cache refcount and free the cache if refcount=0.
+ * Since llite, lov and osc all hold cl_cache refcount,
+ * the free will not cause race. (LU-6173)
+ */
+void cl_cache_decref(struct cl_client_cache *cache)
{
+ if (atomic_dec_and_test(&cache->ccc_users))
+ OBD_FREE(cache, sizeof(*cache));
}
+EXPORT_SYMBOL(cl_cache_decref);