Whamcloud - gitweb
LU-5577 obdclass: change cl_fault_io->ft_nob to size_t
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
index 2d19693..446eb4c 100644 (file)
@@ -123,7 +123,7 @@ cl_page_at_trusted(const struct cl_page *page,
        const struct cl_page_slice *slice;
        ENTRY;
 
-       cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+       list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
                if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
                        RETURN(slice);
        }
@@ -135,18 +135,18 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
        struct cl_object *obj  = page->cp_obj;
        int pagesize = cl_object_header(obj)->coh_page_bufsize;
 
-       PASSERT(env, page, cfs_list_empty(&page->cp_batch));
+       PASSERT(env, page, list_empty(&page->cp_batch));
        PASSERT(env, page, page->cp_owner == NULL);
        PASSERT(env, page, page->cp_req == NULL);
        PASSERT(env, page, page->cp_state == CPS_FREEING);
 
        ENTRY;
-       while (!cfs_list_empty(&page->cp_layers)) {
+       while (!list_empty(&page->cp_layers)) {
                struct cl_page_slice *slice;
 
-               slice = cfs_list_entry(page->cp_layers.next,
-                                      struct cl_page_slice, cpl_linkage);
-               cfs_list_del_init(page->cp_layers.next);
+               slice = list_entry(page->cp_layers.next,
+                                  struct cl_page_slice, cpl_linkage);
+               list_del_init(page->cp_layers.next);
                if (unlikely(slice->cpl_ops->cpo_fini != NULL))
                        slice->cpl_ops->cpo_fini(env, slice);
        }
@@ -179,7 +179,7 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
 
        ENTRY;
        OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
-                       __GFP_IO);
+                       GFP_NOFS);
        if (page != NULL) {
                int result = 0;
                atomic_set(&page->cp_ref, 1);
@@ -190,14 +190,13 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
                page->cp_vmpage = vmpage;
                cl_page_state_set_trust(page, CPS_CACHED);
                page->cp_type = type;
-               CFS_INIT_LIST_HEAD(&page->cp_layers);
-               CFS_INIT_LIST_HEAD(&page->cp_batch);
-               CFS_INIT_LIST_HEAD(&page->cp_flight);
-               mutex_init(&page->cp_mutex);
+               INIT_LIST_HEAD(&page->cp_layers);
+               INIT_LIST_HEAD(&page->cp_batch);
+               INIT_LIST_HEAD(&page->cp_flight);
                lu_ref_init(&page->cp_reference);
                head = o->co_lu.lo_header;
-               cfs_list_for_each_entry(o, &head->loh_layers,
-                                       co_lu.lo_linkage) {
+               list_for_each_entry(o, &head->loh_layers,
+                                   co_lu.lo_linkage) {
                        if (o->co_ops->coo_page_init != NULL) {
                                result = o->co_ops->coo_page_init(env, o, page,
                                                                  ind);
@@ -279,11 +278,6 @@ EXPORT_SYMBOL(cl_page_find);
 
 static inline int cl_page_invariant(const struct cl_page *pg)
 {
-       /*
-        * Page invariant is protected by a VM lock.
-        */
-       LINVRNT(cl_page_is_vmlocked(NULL, pg));
-
        return cl_page_in_use_noref(pg);
 }
 
@@ -389,7 +383,7 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
 
                LASSERT(atomic_read(&page->cp_ref) == 0);
                PASSERT(env, page, page->cp_owner == NULL);
-               PASSERT(env, page, cfs_list_empty(&page->cp_batch));
+               PASSERT(env, page, list_empty(&page->cp_batch));
                /*
                 * Page is no longer reachable by other threads. Tear
                 * it down.
@@ -435,27 +429,51 @@ EXPORT_SYMBOL(cl_page_at);
 
 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
 
-#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...)                   \
-({                                                                      \
-       const struct lu_env        *__env  = (_env);                    \
-       struct cl_page             *__page = (_page);                   \
-       const struct cl_page_slice *__scan;                             \
-       int                         __result;                           \
-       ptrdiff_t                   __op   = (_op);                     \
-       int                       (*__method)_proto;                    \
+#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...)                  \
+({                                                                     \
+       const struct lu_env        *__env  = (_env);                    \
+       struct cl_page             *__page = (_page);                   \
+       const struct cl_page_slice *__scan;                             \
+       int                         __result;                           \
+       ptrdiff_t                   __op   = (_op);                     \
+       int                        (*__method)_proto;                   \
                                                                        \
-       __result = 0;                                                   \
-       cfs_list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) {     \
-               __method = *(void **)((char *)__scan->cpl_ops +  __op);        \
-               if (__method != NULL) {                                        \
+       __result = 0;                                                   \
+       list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) {  \
+               __method = *(void **)((char *)__scan->cpl_ops +  __op); \
+               if (__method != NULL) {                                 \
                        __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
-                       if (__result != 0)                              \
-                               break;                                  \
-               }                                                       \
-       }                                                               \
-       if (__result > 0)                                               \
-               __result = 0;                                           \
-       __result;                                                       \
+                       if (__result != 0)                              \
+                               break;                                  \
+               }                                                       \
+       }                                                               \
+       if (__result > 0)                                               \
+               __result = 0;                                           \
+       __result;                                                       \
+})
+
+#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...)          \
+({                                                                     \
+       const struct lu_env        *__env  = (_env);                    \
+       struct cl_page             *__page = (_page);                   \
+       const struct cl_page_slice *__scan;                             \
+       int                         __result;                           \
+       ptrdiff_t                   __op   = (_op);                     \
+       int                       (*__method)_proto;                    \
+                                                                       \
+       __result = 0;                                                   \
+       list_for_each_entry_reverse(__scan, &__page->cp_layers,         \
+                                   cpl_linkage) {                      \
+               __method = *(void **)((char *)__scan->cpl_ops +  __op); \
+               if (__method != NULL) {                                 \
+                       __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
+                       if (__result != 0)                              \
+                               break;                                  \
+               }                                                       \
+       }                                                               \
+       if (__result > 0)                                               \
+               __result = 0;                                           \
+       __result;                                                       \
 })
 
 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...)                  \
@@ -466,8 +484,7 @@ do {                                                                        \
        ptrdiff_t                   __op   = (_op);                     \
        void                      (*__method)_proto;                    \
                                                                        \
-       cfs_list_for_each_entry(__scan, &__page->cp_layers,             \
-                               cpl_linkage) {                          \
+       list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) {  \
                __method = *(void **)((char *)__scan->cpl_ops +  __op); \
                if (__method != NULL)                                   \
                        (*__method)(__env, __scan, ## __VA_ARGS__);     \
@@ -483,8 +500,8 @@ do {                                                                        \
        void                      (*__method)_proto;                    \
                                                                        \
        /* get to the bottom page. */                                   \
-       cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers,     \
-                                       cpl_linkage) {                  \
+       list_for_each_entry_reverse(__scan, &__page->cp_layers,         \
+                                   cpl_linkage) {                      \
                __method = *(void **)((char *)__scan->cpl_ops + __op);  \
                if (__method != NULL)                                   \
                        (*__method)(__env, __scan, ## __VA_ARGS__);     \
@@ -522,7 +539,6 @@ static void cl_page_owner_clear(struct cl_page *page)
                LASSERT(page->cp_owner->ci_owned_nr > 0);
                page->cp_owner->ci_owned_nr--;
                page->cp_owner = NULL;
-               page->cp_task = NULL;
        }
        EXIT;
 }
@@ -614,7 +630,6 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
                         PASSERT(env, pg, pg->cp_owner == NULL);
                         PASSERT(env, pg, pg->cp_req == NULL);
                        pg->cp_owner = cl_io_top(io);;
-                        pg->cp_task  = current;
                         cl_page_owner_set(pg);
                         if (pg->cp_state != CPS_FREEING) {
                                 cl_page_state_set(env, pg, CPS_OWNED);
@@ -673,7 +688,6 @@ void cl_page_assume(const struct lu_env *env,
        cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
        PASSERT(env, pg, pg->cp_owner == NULL);
        pg->cp_owner = cl_io_top(io);
-       pg->cp_task = current;
        cl_page_owner_set(pg);
        cl_page_state_set(env, pg, CPS_OWNED);
        EXIT;
@@ -932,11 +946,6 @@ void cl_page_completion(const struct lu_env *env,
 
         ENTRY;
         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
-        if (crt == CRT_READ && ioret == 0) {
-                PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
-                pg->cp_flags |= CPF_READ_COMPLETED;
-        }
-
         cl_page_state_set(env, pg, CPS_CACHED);
        if (crt >= CRT_NR)
                return;
@@ -944,7 +953,6 @@ void cl_page_completion(const struct lu_env *env,
                                (const struct lu_env *,
                                 const struct cl_page_slice *, int), ioret);
         if (anchor) {
-                LASSERT(cl_page_is_vmlocked(env, pg));
                 LASSERT(pg->cp_sync_io == anchor);
                 pg->cp_sync_io = NULL;
        }
@@ -955,10 +963,10 @@ void cl_page_completion(const struct lu_env *env,
         */
        cl_page_put(env, pg);
 
-       if (anchor)
-                cl_sync_io_note(anchor, ioret);
+       if (anchor != NULL)
+               cl_sync_io_note(env, anchor, ioret);
 
-        EXIT;
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_completion);
 
@@ -1026,19 +1034,19 @@ EXPORT_SYMBOL(cl_page_flush);
  * \see cl_page_operations::cpo_is_under_lock()
  */
 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
-                          struct cl_page *page)
+                          struct cl_page *page, pgoff_t *max_index)
 {
-        int rc;
+       int rc;
 
-        PINVRNT(env, page, cl_page_invariant(page));
+       PINVRNT(env, page, cl_page_invariant(page));
 
-        ENTRY;
-        rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
-                            (const struct lu_env *,
-                             const struct cl_page_slice *, struct cl_io *),
-                            io);
-        PASSERT(env, page, rc != 0);
-        RETURN(rc);
+       ENTRY;
+       rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
+                                   (const struct lu_env *,
+                                    const struct cl_page_slice *,
+                                    struct cl_io *, pgoff_t *),
+                                   io, max_index);
+       RETURN(rc);
 }
 EXPORT_SYMBOL(cl_page_is_under_lock);
 
@@ -1067,10 +1075,10 @@ void cl_page_header_print(const struct lu_env *env, void *cookie,
                           lu_printer_t printer, const struct cl_page *pg)
 {
        (*printer)(env, cookie,
-                  "page@%p[%d %p %d %d %d %p %p %#x]\n",
+                  "page@%p[%d %p %d %d %d %p %p]\n",
                   pg, atomic_read(&pg->cp_ref), pg->cp_obj,
                   pg->cp_state, pg->cp_error, pg->cp_type,
-                  pg->cp_owner, pg->cp_req, pg->cp_flags);
+                  pg->cp_owner, pg->cp_req);
 }
 EXPORT_SYMBOL(cl_page_header_print);
 
@@ -1118,9 +1126,9 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
 }
 EXPORT_SYMBOL(cl_index);
 
-int cl_page_size(const struct cl_object *obj)
+size_t cl_page_size(const struct cl_object *obj)
 {
-       return 1 << PAGE_CACHE_SHIFT;
+       return 1UL << PAGE_CACHE_SHIFT;
 }
 EXPORT_SYMBOL(cl_page_size);
 
@@ -1134,23 +1142,15 @@ EXPORT_SYMBOL(cl_page_size);
  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
  */
 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
-                       struct cl_object *obj,
-                       const struct cl_page_operations *ops)
+                      struct cl_object *obj, pgoff_t index,
+                      const struct cl_page_operations *ops)
 {
-        ENTRY;
-        cfs_list_add_tail(&slice->cpl_linkage, &page->cp_layers);
-        slice->cpl_obj  = obj;
-        slice->cpl_ops  = ops;
-        slice->cpl_page = page;
-        EXIT;
+       ENTRY;
+       list_add_tail(&slice->cpl_linkage, &page->cp_layers);
+       slice->cpl_obj  = obj;
+       slice->cpl_index = index;
+       slice->cpl_ops  = ops;
+       slice->cpl_page = page;
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_slice_add);
-
-int  cl_page_init(void)
-{
-        return 0;
-}
-
-void cl_page_fini(void)
-{
-}