Whamcloud - gitweb
LU-5710 all: second batch of corrected typos and grammar errors
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
index 6e70d46..03ad075 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -218,7 +218,6 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
        }
        RETURN(page);
 }
-EXPORT_SYMBOL(cl_page_alloc);
 
 /**
  * Returns a cl_page with index \a idx at the object \a o, and associated with
@@ -278,11 +277,6 @@ EXPORT_SYMBOL(cl_page_find);
 
 static inline int cl_page_invariant(const struct cl_page *pg)
 {
-       /*
-        * Page invariant is protected by a VM lock.
-        */
-       LINVRNT(cl_page_is_vmlocked(NULL, pg));
-
        return cl_page_in_use_noref(pg);
 }
 
@@ -457,30 +451,6 @@ EXPORT_SYMBOL(cl_page_at);
        __result;                                                       \
 })
 
-#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...)          \
-({                                                                     \
-       const struct lu_env        *__env  = (_env);                    \
-       struct cl_page             *__page = (_page);                   \
-       const struct cl_page_slice *__scan;                             \
-       int                         __result;                           \
-       ptrdiff_t                   __op   = (_op);                     \
-       int                       (*__method)_proto;                    \
-                                                                       \
-       __result = 0;                                                   \
-       list_for_each_entry_reverse(__scan, &__page->cp_layers,         \
-                                   cpl_linkage) {                      \
-               __method = *(void **)((char *)__scan->cpl_ops +  __op); \
-               if (__method != NULL) {                                 \
-                       __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
-                       if (__result != 0)                              \
-                               break;                                  \
-               }                                                       \
-       }                                                               \
-       if (__result > 0)                                               \
-               __result = 0;                                           \
-       __result;                                                       \
-})
-
 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...)                  \
 do {                                                                   \
        const struct lu_env        *__env  = (_env);                    \
@@ -774,7 +744,7 @@ EXPORT_SYMBOL(cl_page_discard);
 
 /**
  * Version of cl_page_delete() that can be called for not fully constructed
- * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
+ * pages, e.g. in an error handling cl_page_find()->cl_page_delete0()
  * path. Doesn't check page invariant.
  */
 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
@@ -958,7 +928,6 @@ void cl_page_completion(const struct lu_env *env,
                                (const struct lu_env *,
                                 const struct cl_page_slice *, int), ioret);
         if (anchor) {
-                LASSERT(cl_page_is_vmlocked(env, pg));
                 LASSERT(pg->cp_sync_io == anchor);
                 pg->cp_sync_io = NULL;
        }
@@ -969,10 +938,10 @@ void cl_page_completion(const struct lu_env *env,
         */
        cl_page_put(env, pg);
 
-       if (anchor)
-                cl_sync_io_note(anchor, ioret);
+       if (anchor != NULL)
+               cl_sync_io_note(env, anchor, ioret);
 
-        EXIT;
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_completion);
 
@@ -1033,30 +1002,6 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
 EXPORT_SYMBOL(cl_page_flush);
 
 /**
- * Checks whether page is protected by any extent lock is at least required
- * mode.
- *
- * \return the same as in cl_page_operations::cpo_is_under_lock() method.
- * \see cl_page_operations::cpo_is_under_lock()
- */
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
-                          struct cl_page *page, pgoff_t *max_index)
-{
-       int rc;
-
-       PINVRNT(env, page, cl_page_invariant(page));
-
-       ENTRY;
-       rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
-                                   (const struct lu_env *,
-                                    const struct cl_page_slice *,
-                                    struct cl_io *, pgoff_t *),
-                                   io, max_index);
-       RETURN(rc);
-}
-EXPORT_SYMBOL(cl_page_is_under_lock);
-
-/**
  * Tells transfer engine that only part of a page is to be transmitted.
  *
  * \see cl_page_operations::cpo_clip()
@@ -1112,7 +1057,6 @@ int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
                               (const struct lu_env *,
                                const struct cl_page_slice *));
 }
-EXPORT_SYMBOL(cl_page_cancel);
 
 /**
  * Converts a byte offset within object \a obj into a page index.
@@ -1132,9 +1076,9 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
 }
 EXPORT_SYMBOL(cl_index);
 
-int cl_page_size(const struct cl_object *obj)
+size_t cl_page_size(const struct cl_object *obj)
 {
-       return 1 << PAGE_CACHE_SHIFT;
+       return 1UL << PAGE_CACHE_SHIFT;
 }
 EXPORT_SYMBOL(cl_page_size);
 
@@ -1161,11 +1105,51 @@ void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
 }
 EXPORT_SYMBOL(cl_page_slice_add);
 
-int  cl_page_init(void)
+/**
+ * Allocate and initialize cl_cache, called by ll_init_sbi().
+ */
+struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
 {
-        return 0;
+       struct cl_client_cache  *cache = NULL;
+
+       ENTRY;
+       OBD_ALLOC(cache, sizeof(*cache));
+       if (cache == NULL)
+               RETURN(NULL);
+
+       /* Initialize cache data */
+       atomic_set(&cache->ccc_users, 1);
+       cache->ccc_lru_max = lru_page_max;
+       atomic_long_set(&cache->ccc_lru_left, lru_page_max);
+       spin_lock_init(&cache->ccc_lru_lock);
+       INIT_LIST_HEAD(&cache->ccc_lru);
+
+       /* turn unstable check off by default as it impacts performance */
+       cache->ccc_unstable_check = 0;
+       atomic_long_set(&cache->ccc_unstable_nr, 0);
+       init_waitqueue_head(&cache->ccc_unstable_waitq);
+
+       RETURN(cache);
 }
+EXPORT_SYMBOL(cl_cache_init);
 
-void cl_page_fini(void)
+/**
+ * Increase cl_cache refcount
+ */
+void cl_cache_incref(struct cl_client_cache *cache)
+{
+       atomic_inc(&cache->ccc_users);
+}
+EXPORT_SYMBOL(cl_cache_incref);
+
+/**
+ * Decrease cl_cache refcount and free the cache if refcount=0.
+ * Since llite, lov and osc all hold cl_cache refcount,
+ * the free will not cause race. (LU-6173)
+ */
+void cl_cache_decref(struct cl_client_cache *cache)
 {
+       if (atomic_dec_and_test(&cache->ccc_users))
+               OBD_FREE(cache, sizeof(*cache));
 }
+EXPORT_SYMBOL(cl_cache_decref);