Whamcloud - gitweb
LU-5710 all: second batch of corrected typos and grammar errors
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
index 96c5295..03ad075 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -123,7 +123,7 @@ cl_page_at_trusted(const struct cl_page *page,
        const struct cl_page_slice *slice;
        ENTRY;
 
-       cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+       list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
                if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
                        RETURN(slice);
        }
@@ -135,18 +135,18 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
        struct cl_object *obj  = page->cp_obj;
        int pagesize = cl_object_header(obj)->coh_page_bufsize;
 
-       PASSERT(env, page, cfs_list_empty(&page->cp_batch));
+       PASSERT(env, page, list_empty(&page->cp_batch));
        PASSERT(env, page, page->cp_owner == NULL);
        PASSERT(env, page, page->cp_req == NULL);
        PASSERT(env, page, page->cp_state == CPS_FREEING);
 
        ENTRY;
-       while (!cfs_list_empty(&page->cp_layers)) {
+       while (!list_empty(&page->cp_layers)) {
                struct cl_page_slice *slice;
 
-               slice = cfs_list_entry(page->cp_layers.next,
-                                      struct cl_page_slice, cpl_linkage);
-               cfs_list_del_init(page->cp_layers.next);
+               slice = list_entry(page->cp_layers.next,
+                                  struct cl_page_slice, cpl_linkage);
+               list_del_init(page->cp_layers.next);
                if (unlikely(slice->cpl_ops->cpo_fini != NULL))
                        slice->cpl_ops->cpo_fini(env, slice);
        }
@@ -190,13 +190,13 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
                page->cp_vmpage = vmpage;
                cl_page_state_set_trust(page, CPS_CACHED);
                page->cp_type = type;
-               CFS_INIT_LIST_HEAD(&page->cp_layers);
-               CFS_INIT_LIST_HEAD(&page->cp_batch);
-               CFS_INIT_LIST_HEAD(&page->cp_flight);
+               INIT_LIST_HEAD(&page->cp_layers);
+               INIT_LIST_HEAD(&page->cp_batch);
+               INIT_LIST_HEAD(&page->cp_flight);
                lu_ref_init(&page->cp_reference);
                head = o->co_lu.lo_header;
-               cfs_list_for_each_entry(o, &head->loh_layers,
-                                       co_lu.lo_linkage) {
+               list_for_each_entry(o, &head->loh_layers,
+                                   co_lu.lo_linkage) {
                        if (o->co_ops->coo_page_init != NULL) {
                                result = o->co_ops->coo_page_init(env, o, page,
                                                                  ind);
@@ -218,7 +218,6 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
        }
        RETURN(page);
 }
-EXPORT_SYMBOL(cl_page_alloc);
 
 /**
  * Returns a cl_page with index \a idx at the object \a o, and associated with
@@ -278,11 +277,6 @@ EXPORT_SYMBOL(cl_page_find);
 
 static inline int cl_page_invariant(const struct cl_page *pg)
 {
-       /*
-        * Page invariant is protected by a VM lock.
-        */
-       LINVRNT(cl_page_is_vmlocked(NULL, pg));
-
        return cl_page_in_use_noref(pg);
 }
 
@@ -388,7 +382,7 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
 
                LASSERT(atomic_read(&page->cp_ref) == 0);
                PASSERT(env, page, page->cp_owner == NULL);
-               PASSERT(env, page, cfs_list_empty(&page->cp_batch));
+               PASSERT(env, page, list_empty(&page->cp_batch));
                /*
                 * Page is no longer reachable by other threads. Tear
                 * it down.
@@ -434,41 +428,17 @@ EXPORT_SYMBOL(cl_page_at);
 
 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
 
-#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...)                   \
-({                                                                      \
-       const struct lu_env        *__env  = (_env);                    \
-       struct cl_page             *__page = (_page);                   \
-       const struct cl_page_slice *__scan;                             \
-       int                         __result;                           \
-       ptrdiff_t                   __op   = (_op);                     \
-       int                       (*__method)_proto;                    \
-                                                                       \
-       __result = 0;                                                   \
-       cfs_list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) {     \
-               __method = *(void **)((char *)__scan->cpl_ops +  __op);        \
-               if (__method != NULL) {                                        \
-                       __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
-                       if (__result != 0)                              \
-                               break;                                  \
-               }                                                       \
-       }                                                               \
-       if (__result > 0)                                               \
-               __result = 0;                                           \
-       __result;                                                       \
-})
-
-#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...)          \
+#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...)                  \
 ({                                                                     \
-       const struct lu_env        *__env  = (_env);                    \
-       struct cl_page             *__page = (_page);                   \
+       const struct lu_env        *__env  = (_env);                    \
+       struct cl_page             *__page = (_page);                   \
        const struct cl_page_slice *__scan;                             \
-       int                         __result;                           \
-       ptrdiff_t                   __op   = (_op);                     \
-       int                       (*__method)_proto;                    \
+       int                         __result;                           \
+       ptrdiff_t                   __op   = (_op);                     \
+       int                        (*__method)_proto;                   \
                                                                        \
        __result = 0;                                                   \
-       list_for_each_entry_reverse(__scan, &__page->cp_layers,         \
-                                       cpl_linkage) {                  \
+       list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) {  \
                __method = *(void **)((char *)__scan->cpl_ops +  __op); \
                if (__method != NULL) {                                 \
                        __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
@@ -489,8 +459,7 @@ do {                                                                        \
        ptrdiff_t                   __op   = (_op);                     \
        void                      (*__method)_proto;                    \
                                                                        \
-       cfs_list_for_each_entry(__scan, &__page->cp_layers,             \
-                               cpl_linkage) {                          \
+       list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) {  \
                __method = *(void **)((char *)__scan->cpl_ops +  __op); \
                if (__method != NULL)                                   \
                        (*__method)(__env, __scan, ## __VA_ARGS__);     \
@@ -506,8 +475,8 @@ do {                                                                        \
        void                      (*__method)_proto;                    \
                                                                        \
        /* get to the bottom page. */                                   \
-       cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers,     \
-                                       cpl_linkage) {                  \
+       list_for_each_entry_reverse(__scan, &__page->cp_layers,         \
+                                   cpl_linkage) {                      \
                __method = *(void **)((char *)__scan->cpl_ops + __op);  \
                if (__method != NULL)                                   \
                        (*__method)(__env, __scan, ## __VA_ARGS__);     \
@@ -775,7 +744,7 @@ EXPORT_SYMBOL(cl_page_discard);
 
 /**
  * Version of cl_page_delete() that can be called for not fully constructed
- * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
+ * pages, e.g. in an error handling cl_page_find()->cl_page_delete0()
  * path. Doesn't check page invariant.
  */
 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
@@ -959,7 +928,6 @@ void cl_page_completion(const struct lu_env *env,
                                (const struct lu_env *,
                                 const struct cl_page_slice *, int), ioret);
         if (anchor) {
-                LASSERT(cl_page_is_vmlocked(env, pg));
                 LASSERT(pg->cp_sync_io == anchor);
                 pg->cp_sync_io = NULL;
        }
@@ -970,10 +938,10 @@ void cl_page_completion(const struct lu_env *env,
         */
        cl_page_put(env, pg);
 
-       if (anchor)
-                cl_sync_io_note(anchor, ioret);
+       if (anchor != NULL)
+               cl_sync_io_note(env, anchor, ioret);
 
-        EXIT;
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_completion);
 
@@ -1034,30 +1002,6 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
 EXPORT_SYMBOL(cl_page_flush);
 
 /**
- * Checks whether page is protected by any extent lock is at least required
- * mode.
- *
- * \return the same as in cl_page_operations::cpo_is_under_lock() method.
- * \see cl_page_operations::cpo_is_under_lock()
- */
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
-                          struct cl_page *page, pgoff_t *max_index)
-{
-       int rc;
-
-       PINVRNT(env, page, cl_page_invariant(page));
-
-       ENTRY;
-       rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
-                                   (const struct lu_env *,
-                                    const struct cl_page_slice *,
-                                    struct cl_io *, pgoff_t *),
-                                   io, max_index);
-       RETURN(rc);
-}
-EXPORT_SYMBOL(cl_page_is_under_lock);
-
-/**
  * Tells transfer engine that only part of a page is to be transmitted.
  *
  * \see cl_page_operations::cpo_clip()
@@ -1113,7 +1057,6 @@ int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
                               (const struct lu_env *,
                                const struct cl_page_slice *));
 }
-EXPORT_SYMBOL(cl_page_cancel);
 
 /**
  * Converts a byte offset within object \a obj into a page index.
@@ -1133,9 +1076,9 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
 }
 EXPORT_SYMBOL(cl_index);
 
-int cl_page_size(const struct cl_object *obj)
+size_t cl_page_size(const struct cl_object *obj)
 {
-       return 1 << PAGE_CACHE_SHIFT;
+       return 1UL << PAGE_CACHE_SHIFT;
 }
 EXPORT_SYMBOL(cl_page_size);
 
@@ -1162,11 +1105,51 @@ void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
 }
 EXPORT_SYMBOL(cl_page_slice_add);
 
-int  cl_page_init(void)
+/**
+ * Allocate and initialize cl_cache, called by ll_init_sbi().
+ */
+struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
 {
-        return 0;
+       struct cl_client_cache  *cache = NULL;
+
+       ENTRY;
+       OBD_ALLOC(cache, sizeof(*cache));
+       if (cache == NULL)
+               RETURN(NULL);
+
+       /* Initialize cache data */
+       atomic_set(&cache->ccc_users, 1);
+       cache->ccc_lru_max = lru_page_max;
+       atomic_long_set(&cache->ccc_lru_left, lru_page_max);
+       spin_lock_init(&cache->ccc_lru_lock);
+       INIT_LIST_HEAD(&cache->ccc_lru);
+
+       /* turn unstable check off by default as it impacts performance */
+       cache->ccc_unstable_check = 0;
+       atomic_long_set(&cache->ccc_unstable_nr, 0);
+       init_waitqueue_head(&cache->ccc_unstable_waitq);
+
+       RETURN(cache);
+}
+EXPORT_SYMBOL(cl_cache_init);
+
+/**
+ * Increase cl_cache refcount
+ */
+void cl_cache_incref(struct cl_client_cache *cache)
+{
+       atomic_inc(&cache->ccc_users);
 }
+EXPORT_SYMBOL(cl_cache_incref);
 
-void cl_page_fini(void)
+/**
+ * Decrease cl_cache refcount and free the cache if refcount=0.
+ * Since llite, lov and osc all hold cl_cache refcount,
+ * the free will not cause race. (LU-6173)
+ */
+void cl_cache_decref(struct cl_client_cache *cache)
 {
+       if (atomic_dec_and_test(&cache->ccc_users))
+               OBD_FREE(cache, sizeof(*cache));
 }
+EXPORT_SYMBOL(cl_cache_decref);