Whamcloud - gitweb
LU-744 clio: save memory allocations for cl_page
authorJinshan Xiong <jinshan.xiong@intel.com>
Thu, 20 Dec 2012 02:07:45 +0000 (18:07 -0800)
committerOleg Drokin <green@whamcloud.com>
Sat, 26 Jan 2013 02:45:39 +0000 (21:45 -0500)
It used to allocate 6 piece of memory to create a page:
 - top cl_page, llite & lov page slice;
 - subpage, lovsub_page & osc_page.

This patch will allocate top cl_page, llite and lov page slice in
the same piece of memory, similar to sub pages, so that only 2
memory allocations will be needed for each page.

Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com>
Change-Id: I1c4dd0541bda23e7d739131f73bcd895b8dd2c13
Reviewed-on: http://review.whamcloud.com/4943
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Fan Yong <fan.yong@intel.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
21 files changed:
libcfs/include/libcfs/posix/libcfs.h
lustre/include/cl_object.h
lustre/lclient/lcommon_cl.c
lustre/liblustre/llite_cl.c
lustre/llite/lloop.c
lustre/llite/vvp_dev.c
lustre/llite/vvp_internal.h
lustre/llite/vvp_page.c
lustre/lov/lov_cl_internal.h
lustre/lov/lov_dev.c
lustre/lov/lov_object.c
lustre/lov/lov_page.c
lustre/lov/lovsub_object.c
lustre/lov/lovsub_page.c
lustre/obdclass/cl_object.c
lustre/obdclass/cl_page.c
lustre/obdecho/echo_client.c
lustre/osc/osc_cl_internal.h
lustre/osc/osc_dev.c
lustre/osc/osc_object.c
lustre/osc/osc_page.c

index 5d7dfd2..95ec2ab 100644 (file)
@@ -198,6 +198,10 @@ typedef struct dirent64 cfs_dirent_t;
 #define __swab64s(x)                            do { *(x) = __swab64(*(x)); } while (0)
 #endif
 
+#if !defined(ALIGN)
+#define __ALIGN_MASK(x, mask)  (((x) + (mask)) & ~(mask))
+#define ALIGN(x, a)            __ALIGN_MASK(x, (typeof(x))(a) - 1)
+#endif
 
 # ifndef THREAD_SIZE /* x86_64 linux has THREAD_SIZE in userspace */
 #  define CFS_THREAD_SIZE 8192
index e78d05a..bd254e8 100644 (file)
@@ -251,6 +251,8 @@ struct cl_object {
         struct lu_object                   co_lu;
         /** per-object-layer operations */
         const struct cl_object_operations *co_ops;
+       /** offset of page slice in cl_page buffer */
+       int                                co_slice_off;
 };
 
 /**
@@ -319,10 +321,8 @@ struct cl_object_operations {
          * \retval valid-pointer pointer to already existing referenced page
          *         to be used instead of newly created.
          */
-        struct cl_page *(*coo_page_init)(const struct lu_env *env,
-                                         struct cl_object *obj,
-                                         struct cl_page *page,
-                                         cfs_page_t *vmpage);
+       int  (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
+                               struct cl_page *page, cfs_page_t *vmpage);
         /**
          * Initialize lock slice for this layer. Called top-to-bottom through
          * every object layer when a new cl_lock is instantiated. Layer
@@ -431,10 +431,14 @@ struct cl_object_header {
          */
        spinlock_t               coh_attr_guard;
        /**
+        * Size of cl_page + page slices
+        */
+       unsigned short           coh_page_bufsize;
+       /**
         * Number of objects above this one: 0 for a top-object, 1 for its
         * sub-object, etc.
         */
-       unsigned                 coh_nesting;
+       unsigned char            coh_nesting;
 };
 
 /**
@@ -2756,6 +2760,18 @@ static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
         return cl_object_header(o0) == cl_object_header(o1);
 }
 
+static inline void cl_object_page_init(struct cl_object *clob, int size)
+{
+       clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
+       cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8);
+}
+
+static inline void *cl_object_page_slice(struct cl_object *clob,
+                                        struct cl_page *page)
+{
+       return (void *)((char *)page + clob->co_slice_off);
+}
+
 /** @} cl_object */
 
 /** \defgroup cl_page cl_page
index 0ad8f1e..c9ba748 100644 (file)
@@ -368,6 +368,7 @@ int ccc_object_init0(const struct lu_env *env,
 {
         vob->cob_inode = conf->coc_inode;
         vob->cob_transient_pages = 0;
+       cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page));
         return 0;
 }
 
index f906613..7374e4b 100644 (file)
@@ -57,9 +57,8 @@
 static int   slp_type_init     (struct lu_device_type *t);
 static void  slp_type_fini     (struct lu_device_type *t);
 
-static struct cl_page * slp_page_init(const struct lu_env *env,
-                                     struct cl_object *obj,
-                                     struct cl_page *page, cfs_page_t *vmpage);
+static int slp_page_init(const struct lu_env *env, struct cl_object *obj,
+                        struct cl_page *page, cfs_page_t *vmpage);
 static int   slp_attr_get     (const struct lu_env *env, struct cl_object *obj,
                                struct cl_attr *attr);
 
@@ -224,32 +223,26 @@ void slp_global_fini(void)
  *
  */
 
-static struct cl_page *slp_page_init(const struct lu_env *env,
-                                     struct cl_object *obj,
-                                     struct cl_page *page, cfs_page_t *vmpage)
+static int slp_page_init(const struct lu_env *env, struct cl_object *obj,
+                       struct cl_page *page, cfs_page_t *vmpage)
 {
-        struct ccc_page *cpg;
-        int result;
+        struct ccc_page *cpg = cl_object_page_slice(obj, page);
 
         CLOBINVRNT(env, obj, ccc_object_invariant(obj));
 
-        OBD_ALLOC_PTR(cpg);
-        if (cpg != NULL) {
-                cpg->cpg_page = vmpage;
+       cpg->cpg_page = vmpage;
 
-                if (page->cp_type == CPT_CACHEABLE) {
-                        LBUG();
-                } else {
-                        struct ccc_object *clobj = cl2ccc(obj);
+       if (page->cp_type == CPT_CACHEABLE) {
+               LBUG();
+       } else {
+               struct ccc_object *clobj = cl2ccc(obj);
 
-                        cl_page_slice_add(page, &cpg->cpg_cl, obj,
-                                          &slp_transient_page_ops);
-                        clobj->cob_transient_pages++;
-                }
-                result = 0;
-        } else
-                result = -ENOMEM;
-        return ERR_PTR(result);
+               cl_page_slice_add(page, &cpg->cpg_cl, obj,
+                               &slp_transient_page_ops);
+               clobj->cob_transient_pages++;
+       }
+
+        return 0;
 }
 
 static int slp_io_init(const struct lu_env *env, struct cl_object *obj,
index 2c17447..5f00ab2 100644 (file)
@@ -248,7 +248,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
         * Unfortunately this is NOT easy to fix.
         * Thoughts on solution:
         * 0. Define a reserved pool for cl_pages, which could be a list of
-        *    pre-allocated cl_pages from cl_page_kmem;
+        *    pre-allocated cl_pages;
         * 1. Define a new operation in cl_object_operations{}, says clo_depth,
         *    which measures how many layers for this lustre object. Generally
         *    speaking, the depth would be 2, one for llite, and one for lovsub.
index dca7a39..bca4997 100644 (file)
  * "llite_" (var. "ll_") prefix.
  */
 
-cfs_mem_cache_t *vvp_page_kmem;
 cfs_mem_cache_t *vvp_thread_kmem;
 static cfs_mem_cache_t *vvp_session_kmem;
 static struct lu_kmem_descr vvp_caches[] = {
         {
-                .ckd_cache = &vvp_page_kmem,
-                .ckd_name  = "vvp_page_kmem",
-                .ckd_size  = sizeof (struct ccc_page)
-        },
-        {
                 .ckd_cache = &vvp_thread_kmem,
                 .ckd_name  = "vvp_thread_kmem",
                 .ckd_size  = sizeof (struct vvp_thread_info),
index 0fbba8a..33e34a6 100644 (file)
@@ -51,7 +51,7 @@ int               vvp_io_init     (const struct lu_env *env,
 int               vvp_lock_init   (const struct lu_env *env,
                                    struct cl_object *obj, struct cl_lock *lock,
                                    const struct cl_io *io);
-struct cl_page   *vvp_page_init   (const struct lu_env *env,
+int              vvp_page_init   (const struct lu_env *env,
                                    struct cl_object *obj,
                                    struct cl_page *page, cfs_page_t *vmpage);
 struct lu_object *vvp_object_alloc(const struct lu_env *env,
@@ -60,7 +60,6 @@ struct lu_object *vvp_object_alloc(const struct lu_env *env,
 
 struct ccc_object *cl_inode2ccc(struct inode *inode);
 
-extern cfs_mem_cache_t *vvp_page_kmem;
 extern cfs_mem_cache_t *vvp_thread_kmem;
 
 #endif /* VVP_INTERNAL_H */
index 6d772e8..51e8384 100644 (file)
@@ -62,7 +62,6 @@ static void vvp_page_fini_common(struct ccc_page *cp)
 
         LASSERT(vmpage != NULL);
         page_cache_release(vmpage);
-        OBD_SLAB_FREE_PTR(cp, vvp_page_kmem);
 }
 
 static void vvp_page_fini(const struct lu_env *env,
@@ -534,36 +533,30 @@ static const struct cl_page_operations vvp_transient_page_ops = {
         }
 };
 
-struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj,
-                             struct cl_page *page, cfs_page_t *vmpage)
+int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
+               struct cl_page *page, cfs_page_t *vmpage)
 {
-       struct ccc_page *cpg;
-       int result;
+       struct ccc_page *cpg = cl_object_page_slice(obj, page);
 
        CLOBINVRNT(env, obj, ccc_object_invariant(obj));
 
-       OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO);
-       if (cpg != NULL) {
-               cpg->cpg_page = vmpage;
-               page_cache_get(vmpage);
-
-               CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
-               if (page->cp_type == CPT_CACHEABLE) {
-                       SetPagePrivate(vmpage);
-                       vmpage->private = (unsigned long)page;
-                       cl_page_slice_add(page, &cpg->cpg_cl, obj,
-                                         &vvp_page_ops);
-               } else {
-                       struct ccc_object *clobj = cl2ccc(obj);
-
-                       LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
-                       cl_page_slice_add(page, &cpg->cpg_cl, obj,
-                                         &vvp_transient_page_ops);
-                       clobj->cob_transient_pages++;
-               }
-               result = 0;
-       } else
-               result = -ENOMEM;
-       return ERR_PTR(result);
+       cpg->cpg_page = vmpage;
+       page_cache_get(vmpage);
+
+       CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
+       if (page->cp_type == CPT_CACHEABLE) {
+               SetPagePrivate(vmpage);
+               vmpage->private = (unsigned long)page;
+               cl_page_slice_add(page, &cpg->cpg_cl, obj,
+                               &vvp_page_ops);
+       } else {
+               struct ccc_object *clobj = cl2ccc(obj);
+
+               LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
+               cl_page_slice_add(page, &cpg->cpg_cl, obj,
+                               &vvp_transient_page_ops);
+               clobj->cob_transient_pages++;
+       }
+       return 0;
 }
 
index efc8d04..6d4360c 100644 (file)
@@ -554,14 +554,12 @@ extern struct lu_device_type lovsub_device_type;
 extern struct lu_context_key lov_key;
 extern struct lu_context_key lov_session_key;
 
-extern cfs_mem_cache_t *lov_page_kmem;
 extern cfs_mem_cache_t *lov_lock_kmem;
 extern cfs_mem_cache_t *lov_object_kmem;
 extern cfs_mem_cache_t *lov_thread_kmem;
 extern cfs_mem_cache_t *lov_session_kmem;
 extern cfs_mem_cache_t *lov_req_kmem;
 
-extern cfs_mem_cache_t *lovsub_page_kmem;
 extern cfs_mem_cache_t *lovsub_lock_kmem;
 extern cfs_mem_cache_t *lovsub_object_kmem;
 extern cfs_mem_cache_t *lovsub_req_kmem;
@@ -598,17 +596,17 @@ int   lov_sublock_modify  (const struct lu_env *env, struct lov_lock *lov,
                            const struct cl_lock_descr *d, int idx);
 
 
-struct cl_page *lov_page_init   (const struct lu_env *env, struct cl_object *ob,
-                                 struct cl_page *page, cfs_page_t *vmpage);
-struct cl_page *lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
-                                 struct cl_page *page, cfs_page_t *vmpage);
+int   lov_page_init       (const struct lu_env *env, struct cl_object *ob,
+                           struct cl_page *page, cfs_page_t *vmpage);
+int   lovsub_page_init    (const struct lu_env *env, struct cl_object *ob,
+                           struct cl_page *page, cfs_page_t *vmpage);
 
-struct cl_page   *lov_page_init_empty(const struct lu_env *env,
-                                      struct cl_object *obj,
-                                      struct cl_page *page, cfs_page_t *vmpage);
-struct cl_page   *lov_page_init_raid0(const struct lu_env *env,
-                                      struct cl_object *obj,
-                                      struct cl_page *page, cfs_page_t *vmpage);
+int   lov_page_init_empty (const struct lu_env *env,
+                           struct cl_object *obj,
+                           struct cl_page *page, cfs_page_t *vmpage);
+int   lov_page_init_raid0 (const struct lu_env *env,
+                           struct cl_object *obj,
+                           struct cl_page *page, cfs_page_t *vmpage);
 struct lu_object *lov_object_alloc   (const struct lu_env *env,
                                       const struct lu_object_header *hdr,
                                       struct lu_device *dev);
index 2bc372d..4dce6a6 100644 (file)
 
 #include "lov_cl_internal.h"
 
-cfs_mem_cache_t *lov_page_kmem;
 cfs_mem_cache_t *lov_lock_kmem;
 cfs_mem_cache_t *lov_object_kmem;
 cfs_mem_cache_t *lov_thread_kmem;
 cfs_mem_cache_t *lov_session_kmem;
 cfs_mem_cache_t *lov_req_kmem;
 
-cfs_mem_cache_t *lovsub_page_kmem;
 cfs_mem_cache_t *lovsub_lock_kmem;
 cfs_mem_cache_t *lovsub_object_kmem;
 cfs_mem_cache_t *lovsub_req_kmem;
@@ -64,11 +62,6 @@ struct lock_class_key cl_lov_device_mutex_class;
 
 struct lu_kmem_descr lov_caches[] = {
         {
-                .ckd_cache = &lov_page_kmem,
-                .ckd_name  = "lov_page_kmem",
-                .ckd_size  = sizeof (struct lov_page)
-        },
-        {
                 .ckd_cache = &lov_lock_kmem,
                 .ckd_name  = "lov_lock_kmem",
                 .ckd_size  = sizeof (struct lov_lock)
@@ -94,11 +87,6 @@ struct lu_kmem_descr lov_caches[] = {
                 .ckd_size  = sizeof (struct lov_req)
         },
         {
-                .ckd_cache = &lovsub_page_kmem,
-                .ckd_name  = "lovsub_page_kmem",
-                .ckd_size  = sizeof (struct lovsub_page)
-        },
-        {
                 .ckd_cache = &lovsub_lock_kmem,
                 .ckd_name  = "lovsub_lock_kmem",
                 .ckd_size  = sizeof (struct lovsub_lock)
index 2908bc0..4b1d3af 100644 (file)
@@ -67,10 +67,8 @@ struct lov_layout_operations {
                             union lov_layout_state *state);
         int  (*llo_print)(const struct lu_env *env, void *cookie,
                           lu_printer_t p, const struct lu_object *o);
-        struct cl_page *(*llo_page_init)(const struct lu_env *env,
-                                         struct cl_object *obj,
-                                         struct cl_page *page,
-                                         cfs_page_t *vmpage);
+        int  (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
+                               struct cl_page *page, cfs_page_t *vmpage);
         int  (*llo_lock_init)(const struct lu_env *env,
                               struct cl_object *obj, struct cl_lock *lock,
                               const struct cl_io *io);
@@ -635,6 +633,8 @@ int lov_object_init(const struct lu_env *env, struct lu_object *obj,
        init_rwsem(&lov->lo_type_guard);
        cfs_waitq_init(&lov->lo_waitq);
 
+       cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
+
         /* no locking is necessary, as object is being created */
         lov->lo_type = cconf->u.coc_md->lsm != NULL ? LLT_RAID0 : LLT_EMPTY;
         ops = &lov_dispatch[lov->lo_type];
@@ -733,8 +733,8 @@ static int lov_object_print(const struct lu_env *env, void *cookie,
         return LOV_2DISPATCH(lu2lov(o), llo_print, env, cookie, p, o);
 }
 
-struct cl_page *lov_page_init(const struct lu_env *env, struct cl_object *obj,
-                              struct cl_page *page, cfs_page_t *vmpage)
+int lov_page_init(const struct lu_env *env, struct cl_object *obj,
+               struct cl_page *page, cfs_page_t *vmpage)
 {
         return LOV_2DISPATCH_NOLOCK(cl2lov(obj),
                                    llo_page_init, env, obj, page, vmpage);
index f7cf9f5..74efbeb 100644 (file)
@@ -66,7 +66,6 @@ static int lov_page_invariant(const struct cl_page_slice *slice)
 static void lov_page_fini(const struct lu_env *env,
                           struct cl_page_slice *slice)
 {
-        struct lov_page *lp  = cl2lov_page(slice);
         struct cl_page  *sub = lov_sub_page(slice);
 
         LINVRNT(lov_page_invariant(slice));
@@ -79,7 +78,6 @@ static void lov_page_fini(const struct lu_env *env,
                 slice->cpl_page->cp_child = NULL;
                 cl_page_put(env, sub);
         }
-        OBD_SLAB_FREE_PTR(lp, lov_page_kmem);
         EXIT;
 }
 
@@ -157,17 +155,11 @@ static const struct cl_page_operations lov_page_ops = {
 static void lov_empty_page_fini(const struct lu_env *env,
                                 struct cl_page_slice *slice)
 {
-        struct lov_page *lp  = cl2lov_page(slice);
-
         LASSERT(slice->cpl_page->cp_child == NULL);
-        ENTRY;
-        OBD_SLAB_FREE_PTR(lp, lov_page_kmem);
-        EXIT;
 }
 
-struct cl_page *lov_page_init_raid0(const struct lu_env *env,
-                                    struct cl_object *obj, struct cl_page *page,
-                                    cfs_page_t *vmpage)
+int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
+                       struct cl_page *page, cfs_page_t *vmpage)
 {
         struct lov_object *loo = cl2lov(obj);
         struct lov_layout_raid0 *r0 = lov_r0(loo);
@@ -175,8 +167,7 @@ struct cl_page *lov_page_init_raid0(const struct lu_env *env,
         struct cl_page    *subpage;
         struct cl_object  *subobj;
         struct lov_io_sub *sub;
-        struct lov_page   *lpg;
-        struct cl_page    *result;
+        struct lov_page   *lpg = cl_object_page_slice(obj, page);
         loff_t             offset;
         obd_off            suboff;
         int                stripe;
@@ -190,28 +181,24 @@ struct cl_page *lov_page_init_raid0(const struct lu_env *env,
                                    &suboff);
         LASSERT(rc == 0);
 
-        OBD_SLAB_ALLOC_PTR_GFP(lpg, lov_page_kmem, CFS_ALLOC_IO);
-        if (lpg == NULL)
-                GOTO(out, result = ERR_PTR(-ENOMEM));
-
         lpg->lps_invalid = 1;
         cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);
 
         sub = lov_sub_get(env, lio, stripe);
         if (IS_ERR(sub))
-                GOTO(out, result = (struct cl_page *)sub);
+                GOTO(out, rc = PTR_ERR(sub));
 
         subobj = lovsub2cl(r0->lo_sub[stripe]);
         subpage = cl_page_find_sub(sub->sub_env, subobj,
                                    cl_index(subobj, suboff), vmpage, page);
         lov_sub_put(sub);
         if (IS_ERR(subpage))
-                GOTO(out, result = subpage);
+                GOTO(out, rc = PTR_ERR(subpage));
 
         if (likely(subpage->cp_parent == page)) {
                 lu_ref_add(&subpage->cp_reference, "lov", page);
                 lpg->lps_invalid = 0;
-                result = NULL;
+               rc = 0;
         } else {
                 CL_PAGE_DEBUG(D_ERROR, env, page, "parent page\n");
                 CL_PAGE_DEBUG(D_ERROR, env, subpage, "child page\n");
@@ -220,7 +207,7 @@ struct cl_page *lov_page_init_raid0(const struct lu_env *env,
 
         EXIT;
 out:
-        return(result);
+        return rc;
 }
 
 
@@ -229,26 +216,19 @@ static const struct cl_page_operations lov_empty_page_ops = {
         .cpo_print  = lov_page_print
 };
 
-struct cl_page *lov_page_init_empty(const struct lu_env *env,
-                                    struct cl_object *obj, struct cl_page *page,
-                                    cfs_page_t *vmpage)
+int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
+                       struct cl_page *page, cfs_page_t *vmpage)
 {
-        struct lov_page   *lpg;
-        int result = -ENOMEM;
+        struct lov_page *lpg = cl_object_page_slice(obj, page);
+       void *addr;
         ENTRY;
 
-        OBD_SLAB_ALLOC_PTR_GFP(lpg, lov_page_kmem, CFS_ALLOC_IO);
-        if (lpg != NULL) {
-                void *addr;
-                cl_page_slice_add(page, &lpg->lps_cl,
-                                  obj, &lov_empty_page_ops);
-                addr = cfs_kmap(vmpage);
-                memset(addr, 0, cl_page_size(obj));
-                cfs_kunmap(vmpage);
-                cl_page_export(env, page, 1);
-                result = 0;
-        }
-        RETURN(ERR_PTR(result));
+       cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
+       addr = cfs_kmap(vmpage);
+       memset(addr, 0, cl_page_size(obj));
+       cfs_kunmap(vmpage);
+       cl_page_export(env, page, 1);
+        RETURN(0);
 }
 
 
index 3527bd1..536039d 100644 (file)
@@ -66,6 +66,7 @@ int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
         below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
         if (below != NULL) {
                 lu_object_add(obj, below);
+               cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page));
                 result = 0;
         } else
                 result = -ENOMEM;
index 4940af9..f1e5b46 100644 (file)
 static void lovsub_page_fini(const struct lu_env *env,
                              struct cl_page_slice *slice)
 {
-        struct lovsub_page *lsb = cl2lovsub_page(slice);
-        ENTRY;
-        OBD_SLAB_FREE_PTR(lsb, lovsub_page_kmem);
-        EXIT;
 }
 
 static const struct cl_page_operations lovsub_page_ops = {
         .cpo_fini   = lovsub_page_fini
 };
 
-struct cl_page *lovsub_page_init(const struct lu_env *env,
-                                 struct cl_object *obj,
-                                 struct cl_page *page, cfs_page_t *unused)
+int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
+                       struct cl_page *page, cfs_page_t *unused)
 {
-        struct lovsub_page *lsb;
-        int result;
-
+        struct lovsub_page *lsb = cl_object_page_slice(obj, page);
         ENTRY;
-        OBD_SLAB_ALLOC_PTR_GFP(lsb, lovsub_page_kmem, CFS_ALLOC_IO);
-        if (lsb != NULL) {
-                cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
-                result = 0;
-        } else
-                result = -ENOMEM;
-        RETURN(ERR_PTR(result));
+
+       cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
+        RETURN(0);
 }
 
 /** @} lov */
index 8f47a14..a7692e1 100644 (file)
@@ -92,6 +92,7 @@ int cl_object_header_init(struct cl_object_header *h)
                 /* XXX hard coded GFP_* mask. */
                 INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
                 CFS_INIT_LIST_HEAD(&h->coh_locks);
+               h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
         }
         RETURN(result);
 }
index 96d3551..b34555a 100644 (file)
 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
                             int radix);
 
-static cfs_mem_cache_t      *cl_page_kmem = NULL;
-
-static struct lu_kmem_descr cl_page_caches[] = {
-        {
-                .ckd_cache = &cl_page_kmem,
-                .ckd_name  = "cl_page_kmem",
-                .ckd_size  = sizeof (struct cl_page)
-        },
-        {
-                .ckd_cache = NULL
-        }
-};
-
 #ifdef LIBCFS_DEBUG
 # define PASSERT(env, page, expr)                                       \
   do {                                                                    \
@@ -289,6 +276,7 @@ EXPORT_SYMBOL(cl_page_gang_lookup);
 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
 {
         struct cl_object *obj  = page->cp_obj;
+       int pagesize = cl_object_header(obj)->coh_page_bufsize;
 
         PASSERT(env, page, cfs_list_empty(&page->cp_batch));
         PASSERT(env, page, page->cp_owner == NULL);
@@ -311,7 +299,7 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
         lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
         cl_object_put(env, obj);
         lu_ref_fini(&page->cp_reference);
-        OBD_SLAB_FREE_PTR(page, cl_page_kmem);
+        OBD_FREE(page, pagesize);
         EXIT;
 }
 
@@ -326,58 +314,55 @@ static inline void cl_page_state_set_trust(struct cl_page *page,
         *(enum cl_page_state *)&page->cp_state = state;
 }
 
-static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
-                         pgoff_t ind, struct page *vmpage,
-                         enum cl_page_type type, struct cl_page **out)
+static struct cl_page *cl_page_alloc(const struct lu_env *env,
+               struct cl_object *o, pgoff_t ind, struct page *vmpage,
+               enum cl_page_type type)
 {
-        struct cl_page          *page;
-        struct cl_page          *err  = NULL;
-        struct lu_object_header *head;
-        int                      result;
+       struct cl_page          *page;
+       struct lu_object_header *head;
 
-        ENTRY;
-        result = +1;
-        OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO);
-        if (page != NULL) {
-                cfs_atomic_set(&page->cp_ref, 1);
+       ENTRY;
+       OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
+                       CFS_ALLOC_IO);
+       if (page != NULL) {
+               int result;
+               cfs_atomic_set(&page->cp_ref, 1);
                if (type == CPT_CACHEABLE) /* for radix tree */
                        cfs_atomic_inc(&page->cp_ref);
-                page->cp_obj = o;
-                cl_object_get(o);
-                page->cp_obj_ref = lu_object_ref_add(&o->co_lu,
-                                                     "cl_page", page);
-                page->cp_index = ind;
-                cl_page_state_set_trust(page, CPS_CACHED);
+               page->cp_obj = o;
+               cl_object_get(o);
+               page->cp_obj_ref = lu_object_ref_add(&o->co_lu, "cl_page",page);
+               page->cp_index = ind;
+               cl_page_state_set_trust(page, CPS_CACHED);
                page->cp_type = type;
                CFS_INIT_LIST_HEAD(&page->cp_layers);
                CFS_INIT_LIST_HEAD(&page->cp_batch);
                CFS_INIT_LIST_HEAD(&page->cp_flight);
                mutex_init(&page->cp_mutex);
-                lu_ref_init(&page->cp_reference);
-                head = o->co_lu.lo_header;
-                cfs_list_for_each_entry(o, &head->loh_layers,
-                                        co_lu.lo_linkage) {
-                        if (o->co_ops->coo_page_init != NULL) {
-                                err = o->co_ops->coo_page_init(env, o,
-                                                               page, vmpage);
-                                if (err != NULL) {
-                                        cl_page_delete0(env, page, 0);
-                                        cl_page_free(env, page);
-                                        page = err;
-                                        break;
-                                }
-                        }
-                }
-                if (err == NULL) {
+               lu_ref_init(&page->cp_reference);
+               head = o->co_lu.lo_header;
+               cfs_list_for_each_entry(o, &head->loh_layers,
+                                       co_lu.lo_linkage) {
+                       if (o->co_ops->coo_page_init != NULL) {
+                               result = o->co_ops->coo_page_init(env, o,
+                                                                 page, vmpage);
+                               if (result != 0) {
+                                       cl_page_delete0(env, page, 0);
+                                       cl_page_free(env, page);
+                                       page = ERR_PTR(result);
+                                       break;
+                               }
+                       }
+               }
+               if (result == 0) {
                        CS_PAGE_INC(o, total);
                        CS_PAGE_INC(o, create);
                        CS_PAGESTATE_DEC(o, CPS_CACHED);
-                        result = 0;
-                }
-        } else
-                page = ERR_PTR(-ENOMEM);
-        *out = page;
-        RETURN(result);
+               }
+       } else {
+               page = ERR_PTR(-ENOMEM);
+       }
+       RETURN(page);
 }
 
 /**
@@ -440,8 +425,8 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
         }
 
         /* allocate and initialize cl_page */
-        err = cl_page_alloc(env, o, idx, vmpage, type, &page);
-        if (err != 0)
+        page = cl_page_alloc(env, o, idx, vmpage, type);
+        if (IS_ERR(page))
                 RETURN(page);
 
         if (type == CPT_TRANSIENT) {
@@ -1620,10 +1605,9 @@ EXPORT_SYMBOL(cl_page_slice_add);
 
 int  cl_page_init(void)
 {
-        return lu_kmem_init(cl_page_caches);
+        return 0;
 }
 
 void cl_page_fini(void)
 {
-        lu_kmem_fini(cl_page_caches);
 }
index b15483f..a8f5b0b 100644 (file)
@@ -222,7 +222,6 @@ struct echo_session_info {
         unsigned long dummy;
 };
 
-static cfs_mem_cache_t *echo_page_kmem;
 static cfs_mem_cache_t *echo_lock_kmem;
 static cfs_mem_cache_t *echo_object_kmem;
 static cfs_mem_cache_t *echo_thread_kmem;
@@ -231,11 +230,6 @@ static cfs_mem_cache_t *echo_session_kmem;
 
 static struct lu_kmem_descr echo_caches[] = {
         {
-                .ckd_cache = &echo_page_kmem,
-                .ckd_name  = "echo_page_kmem",
-                .ckd_size  = sizeof (struct echo_page)
-        },
-        {
                 .ckd_cache = &echo_lock_kmem,
                 .ckd_name  = "echo_lock_kmem",
                 .ckd_size  = sizeof (struct echo_lock)
@@ -334,7 +328,6 @@ static void echo_page_fini(const struct lu_env *env,
 
         cfs_atomic_dec(&eco->eo_npages);
         page_cache_release(vmpage);
-        OBD_SLAB_FREE_PTR(ep, echo_page_kmem);
         EXIT;
 }
 
@@ -422,23 +415,19 @@ static struct cl_lock_operations echo_lock_ops = {
  *
  * @{
  */
-static struct cl_page *echo_page_init(const struct lu_env *env,
-                                      struct cl_object *obj,
-                                      struct cl_page *page, cfs_page_t *vmpage)
+static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
+                       struct cl_page *page, cfs_page_t *vmpage)
 {
-        struct echo_page *ep;
+        struct echo_page *ep = cl_object_page_slice(obj, page);
+       struct echo_object *eco = cl2echo_obj(obj);
         ENTRY;
 
-        OBD_SLAB_ALLOC_PTR_GFP(ep, echo_page_kmem, CFS_ALLOC_IO);
-        if (ep != NULL) {
-                struct echo_object *eco = cl2echo_obj(obj);
-                ep->ep_vmpage = vmpage;
-                page_cache_get(vmpage);
-               mutex_init(&ep->ep_lock);
-                cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
-                cfs_atomic_inc(&eco->eo_npages);
-        }
-        RETURN(ERR_PTR(ep ? 0 : -ENOMEM));
+       ep->ep_vmpage = vmpage;
+       page_cache_get(vmpage);
+       mutex_init(&ep->ep_lock);
+       cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
+       cfs_atomic_inc(&eco->eo_npages);
+        RETURN(0);
 }
 
 static int echo_io_init(const struct lu_env *env, struct cl_object *obj,
@@ -518,6 +507,7 @@ static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
 
         eco->eo_dev = ed;
         cfs_atomic_set(&eco->eo_npages, 0);
+       cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
 
        spin_lock(&ec->ec_lock);
        cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
index 2a7e22a..de2b2df 100644 (file)
@@ -403,7 +403,6 @@ struct osc_page {
         struct cl_lock       *ops_lock;
 };
 
-extern cfs_mem_cache_t *osc_page_kmem;
 extern cfs_mem_cache_t *osc_lock_kmem;
 extern cfs_mem_cache_t *osc_object_kmem;
 extern cfs_mem_cache_t *osc_thread_kmem;
@@ -427,9 +426,8 @@ int osc_req_init (const struct lu_env *env, struct cl_device *dev,
 struct lu_object *osc_object_alloc(const struct lu_env *env,
                                    const struct lu_object_header *hdr,
                                    struct lu_device *dev);
-struct cl_page   *osc_page_init   (const struct lu_env *env,
-                                   struct cl_object *obj,
-                                   struct cl_page *page, cfs_page_t *vmpage);
+int osc_page_init(const struct lu_env *env, struct cl_object *obj,
+                 struct cl_page *page, cfs_page_t *vmpage);
 
 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
                         struct ldlm_res_id *resname);
index a76edd3..caccc7a 100644 (file)
@@ -49,7 +49,6 @@
  * @{ 
  */
 
-cfs_mem_cache_t *osc_page_kmem;
 cfs_mem_cache_t *osc_lock_kmem;
 cfs_mem_cache_t *osc_object_kmem;
 cfs_mem_cache_t *osc_thread_kmem;
@@ -60,11 +59,6 @@ cfs_mem_cache_t *osc_quota_kmem;
 
 struct lu_kmem_descr osc_caches[] = {
         {
-                .ckd_cache = &osc_page_kmem,
-                .ckd_name  = "osc_page_kmem",
-                .ckd_size  = sizeof (struct osc_page)
-        },
-        {
                 .ckd_cache = &osc_lock_kmem,
                 .ckd_name  = "osc_lock_kmem",
                 .ckd_size  = sizeof (struct osc_lock)
index 72ba12b..2bf87db 100644 (file)
@@ -98,6 +98,8 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
        cfs_atomic_set(&osc->oo_nr_writes, 0);
        spin_lock_init(&osc->oo_lock);
 
+       cl_object_page_init(lu2cl(obj), sizeof(struct osc_page));
+
        return 0;
 }
 
index 31d59a0..75e6108 100644 (file)
@@ -168,7 +168,6 @@ static void osc_page_fini(const struct lu_env *env,
         struct osc_page *opg = cl2osc_page(slice);
         CDEBUG(D_TRACE, "%p\n", opg);
         LASSERT(opg->ops_lock == NULL);
-        OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
 }
 
 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
@@ -508,46 +507,41 @@ static const struct cl_page_operations osc_page_ops = {
        .cpo_flush          = osc_page_flush
 };
 
-struct cl_page *osc_page_init(const struct lu_env *env,
-                              struct cl_object *obj,
-                              struct cl_page *page, cfs_page_t *vmpage)
+int osc_page_init(const struct lu_env *env, struct cl_object *obj,
+               struct cl_page *page, cfs_page_t *vmpage)
 {
-        struct osc_object *osc = cl2osc(obj);
-        struct osc_page   *opg;
-        int result;
+       struct osc_object *osc = cl2osc(obj);
+       struct osc_page   *opg = cl_object_page_slice(obj, page);
+       int result;
 
-        OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
-        if (opg != NULL) {
-                opg->ops_from = 0;
-                opg->ops_to   = CFS_PAGE_SIZE;
-
-               result = osc_prep_async_page(osc, opg, vmpage,
-                                            cl_offset(obj, page->cp_index));
-                if (result == 0) {
-                        struct osc_io *oio = osc_env_io(env);
-                        opg->ops_srvlock = osc_io_srvlock(oio);
-                        cl_page_slice_add(page, &opg->ops_cl, obj,
-                                          &osc_page_ops);
-                }
-                /*
-                 * Cannot assert osc_page_protected() here as read-ahead
-                 * creates temporary pages outside of a lock.
-                 */
+       opg->ops_from = 0;
+       opg->ops_to   = CFS_PAGE_SIZE;
+
+       result = osc_prep_async_page(osc, opg, vmpage,
+                                       cl_offset(obj, page->cp_index));
+       if (result == 0) {
+               struct osc_io *oio = osc_env_io(env);
+               opg->ops_srvlock = osc_io_srvlock(oio);
+               cl_page_slice_add(page, &opg->ops_cl, obj,
+                               &osc_page_ops);
+       }
+       /*
+        * Cannot assert osc_page_protected() here as read-ahead
+        * creates temporary pages outside of a lock.
+        */
 #ifdef INVARIANT_CHECK
-                opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
+       opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
 #endif
-               /* ops_inflight and ops_lru are the same field, but it doesn't
-                * hurt to initialize it twice :-) */
-                CFS_INIT_LIST_HEAD(&opg->ops_inflight);
-               CFS_INIT_LIST_HEAD(&opg->ops_lru);
-       } else
-               result = -ENOMEM;
+       /* ops_inflight and ops_lru are the same field, but it doesn't
+        * hurt to initialize it twice :-) */
+       CFS_INIT_LIST_HEAD(&opg->ops_inflight);
+       CFS_INIT_LIST_HEAD(&opg->ops_lru);
 
        /* reserve an LRU space for this page */
        if (page->cp_type == CPT_CACHEABLE && result == 0)
                result = osc_lru_reserve(env, osc, opg);
 
-       return ERR_PTR(result);
+       return result;
 }
 
 /**