#define __swab64s(x) do { *(x) = __swab64(*(x)); } while (0)
#endif
+#if !defined(ALIGN)
+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a) - 1)
+#endif
# ifndef THREAD_SIZE /* x86_64 linux has THREAD_SIZE in userspace */
# define CFS_THREAD_SIZE 8192
struct lu_object co_lu;
/** per-object-layer operations */
const struct cl_object_operations *co_ops;
+ /** offset of page slice in cl_page buffer */
+ int co_slice_off;
};
/**
* \retval valid-pointer pointer to already existing referenced page
* to be used instead of newly created.
*/
- struct cl_page *(*coo_page_init)(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page,
- cfs_page_t *vmpage);
+ int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
*/
spinlock_t coh_attr_guard;
/**
+ * Size of cl_page + page slices
+ */
+ unsigned short coh_page_bufsize;
+ /**
* Number of objects above this one: 0 for a top-object, 1 for its
* sub-object, etc.
*/
- unsigned coh_nesting;
+ unsigned char coh_nesting;
};
/**
return cl_object_header(o0) == cl_object_header(o1);
}
+static inline void cl_object_page_init(struct cl_object *clob, int size)
+{
+ clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
+ cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8);
+}
+
+static inline void *cl_object_page_slice(struct cl_object *clob,
+ struct cl_page *page)
+{
+ return (void *)((char *)page + clob->co_slice_off);
+}
+
/** @} cl_object */
/** \defgroup cl_page cl_page
{
vob->cob_inode = conf->coc_inode;
vob->cob_transient_pages = 0;
+ cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page));
return 0;
}
static int slp_type_init (struct lu_device_type *t);
static void slp_type_fini (struct lu_device_type *t);
-static struct cl_page * slp_page_init(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
+static int slp_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage);
static int slp_attr_get (const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr);
*
*/
-static struct cl_page *slp_page_init(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+static int slp_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage)
{
- struct ccc_page *cpg;
- int result;
+ struct ccc_page *cpg = cl_object_page_slice(obj, page);
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- OBD_ALLOC_PTR(cpg);
- if (cpg != NULL) {
- cpg->cpg_page = vmpage;
+ cpg->cpg_page = vmpage;
- if (page->cp_type == CPT_CACHEABLE) {
- LBUG();
- } else {
- struct ccc_object *clobj = cl2ccc(obj);
+ if (page->cp_type == CPT_CACHEABLE) {
+ LBUG();
+ } else {
+ struct ccc_object *clobj = cl2ccc(obj);
- cl_page_slice_add(page, &cpg->cpg_cl, obj,
- &slp_transient_page_ops);
- clobj->cob_transient_pages++;
- }
- result = 0;
- } else
- result = -ENOMEM;
- return ERR_PTR(result);
+ cl_page_slice_add(page, &cpg->cpg_cl, obj,
+ &slp_transient_page_ops);
+ clobj->cob_transient_pages++;
+ }
+
+ return 0;
}
static int slp_io_init(const struct lu_env *env, struct cl_object *obj,
* Unfortunately this is NOT easy to fix.
* Thoughts on solution:
* 0. Define a reserved pool for cl_pages, which could be a list of
- * pre-allocated cl_pages from cl_page_kmem;
+ * pre-allocated cl_pages;
* 1. Define a new operation in cl_object_operations{}, says clo_depth,
* which measures how many layers for this lustre object. Generally
* speaking, the depth would be 2, one for llite, and one for lovsub.
* "llite_" (var. "ll_") prefix.
*/
-cfs_mem_cache_t *vvp_page_kmem;
cfs_mem_cache_t *vvp_thread_kmem;
static cfs_mem_cache_t *vvp_session_kmem;
static struct lu_kmem_descr vvp_caches[] = {
{
- .ckd_cache = &vvp_page_kmem,
- .ckd_name = "vvp_page_kmem",
- .ckd_size = sizeof (struct ccc_page)
- },
- {
.ckd_cache = &vvp_thread_kmem,
.ckd_name = "vvp_thread_kmem",
.ckd_size = sizeof (struct vvp_thread_info),
int vvp_lock_init (const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *io);
-struct cl_page *vvp_page_init (const struct lu_env *env,
+int vvp_page_init (const struct lu_env *env,
struct cl_object *obj,
struct cl_page *page, cfs_page_t *vmpage);
struct lu_object *vvp_object_alloc(const struct lu_env *env,
struct ccc_object *cl_inode2ccc(struct inode *inode);
-extern cfs_mem_cache_t *vvp_page_kmem;
extern cfs_mem_cache_t *vvp_thread_kmem;
#endif /* VVP_INTERNAL_H */
LASSERT(vmpage != NULL);
page_cache_release(vmpage);
- OBD_SLAB_FREE_PTR(cp, vvp_page_kmem);
}
static void vvp_page_fini(const struct lu_env *env,
}
};
-struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage)
{
- struct ccc_page *cpg;
- int result;
+ struct ccc_page *cpg = cl_object_page_slice(obj, page);
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO);
- if (cpg != NULL) {
- cpg->cpg_page = vmpage;
- page_cache_get(vmpage);
-
- CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
- if (page->cp_type == CPT_CACHEABLE) {
- SetPagePrivate(vmpage);
- vmpage->private = (unsigned long)page;
- cl_page_slice_add(page, &cpg->cpg_cl, obj,
- &vvp_page_ops);
- } else {
- struct ccc_object *clobj = cl2ccc(obj);
-
- LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
- cl_page_slice_add(page, &cpg->cpg_cl, obj,
- &vvp_transient_page_ops);
- clobj->cob_transient_pages++;
- }
- result = 0;
- } else
- result = -ENOMEM;
- return ERR_PTR(result);
+ cpg->cpg_page = vmpage;
+ page_cache_get(vmpage);
+
+ CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
+ if (page->cp_type == CPT_CACHEABLE) {
+ SetPagePrivate(vmpage);
+ vmpage->private = (unsigned long)page;
+ cl_page_slice_add(page, &cpg->cpg_cl, obj,
+ &vvp_page_ops);
+ } else {
+ struct ccc_object *clobj = cl2ccc(obj);
+
+ LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
+ cl_page_slice_add(page, &cpg->cpg_cl, obj,
+ &vvp_transient_page_ops);
+ clobj->cob_transient_pages++;
+ }
+ return 0;
}
extern struct lu_context_key lov_key;
extern struct lu_context_key lov_session_key;
-extern cfs_mem_cache_t *lov_page_kmem;
extern cfs_mem_cache_t *lov_lock_kmem;
extern cfs_mem_cache_t *lov_object_kmem;
extern cfs_mem_cache_t *lov_thread_kmem;
extern cfs_mem_cache_t *lov_session_kmem;
extern cfs_mem_cache_t *lov_req_kmem;
-extern cfs_mem_cache_t *lovsub_page_kmem;
extern cfs_mem_cache_t *lovsub_lock_kmem;
extern cfs_mem_cache_t *lovsub_object_kmem;
extern cfs_mem_cache_t *lovsub_req_kmem;
const struct cl_lock_descr *d, int idx);
-struct cl_page *lov_page_init (const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, cfs_page_t *vmpage);
-struct cl_page *lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, cfs_page_t *vmpage);
+int lov_page_init (const struct lu_env *env, struct cl_object *ob,
+ struct cl_page *page, cfs_page_t *vmpage);
+int lovsub_page_init (const struct lu_env *env, struct cl_object *ob,
+ struct cl_page *page, cfs_page_t *vmpage);
-struct cl_page *lov_page_init_empty(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
-struct cl_page *lov_page_init_raid0(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
+int lov_page_init_empty (const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage);
+int lov_page_init_raid0 (const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage);
struct lu_object *lov_object_alloc (const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
#include "lov_cl_internal.h"
-cfs_mem_cache_t *lov_page_kmem;
cfs_mem_cache_t *lov_lock_kmem;
cfs_mem_cache_t *lov_object_kmem;
cfs_mem_cache_t *lov_thread_kmem;
cfs_mem_cache_t *lov_session_kmem;
cfs_mem_cache_t *lov_req_kmem;
-cfs_mem_cache_t *lovsub_page_kmem;
cfs_mem_cache_t *lovsub_lock_kmem;
cfs_mem_cache_t *lovsub_object_kmem;
cfs_mem_cache_t *lovsub_req_kmem;
struct lu_kmem_descr lov_caches[] = {
{
- .ckd_cache = &lov_page_kmem,
- .ckd_name = "lov_page_kmem",
- .ckd_size = sizeof (struct lov_page)
- },
- {
.ckd_cache = &lov_lock_kmem,
.ckd_name = "lov_lock_kmem",
.ckd_size = sizeof (struct lov_lock)
.ckd_size = sizeof (struct lov_req)
},
{
- .ckd_cache = &lovsub_page_kmem,
- .ckd_name = "lovsub_page_kmem",
- .ckd_size = sizeof (struct lovsub_page)
- },
- {
.ckd_cache = &lovsub_lock_kmem,
.ckd_name = "lovsub_lock_kmem",
.ckd_size = sizeof (struct lovsub_lock)
union lov_layout_state *state);
int (*llo_print)(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o);
- struct cl_page *(*llo_page_init)(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page,
- cfs_page_t *vmpage);
+ int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage);
int (*llo_lock_init)(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *io);
init_rwsem(&lov->lo_type_guard);
cfs_waitq_init(&lov->lo_waitq);
+ cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
+
/* no locking is necessary, as object is being created */
lov->lo_type = cconf->u.coc_md->lsm != NULL ? LLT_RAID0 : LLT_EMPTY;
ops = &lov_dispatch[lov->lo_type];
return LOV_2DISPATCH(lu2lov(o), llo_print, env, cookie, p, o);
}
-struct cl_page *lov_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+int lov_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage)
{
return LOV_2DISPATCH_NOLOCK(cl2lov(obj),
llo_page_init, env, obj, page, vmpage);
static void lov_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct lov_page *lp = cl2lov_page(slice);
struct cl_page *sub = lov_sub_page(slice);
LINVRNT(lov_page_invariant(slice));
slice->cpl_page->cp_child = NULL;
cl_page_put(env, sub);
}
- OBD_SLAB_FREE_PTR(lp, lov_page_kmem);
EXIT;
}
static void lov_empty_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct lov_page *lp = cl2lov_page(slice);
-
LASSERT(slice->cpl_page->cp_child == NULL);
- ENTRY;
- OBD_SLAB_FREE_PTR(lp, lov_page_kmem);
- EXIT;
}
-struct cl_page *lov_page_init_raid0(const struct lu_env *env,
- struct cl_object *obj, struct cl_page *page,
- cfs_page_t *vmpage)
+int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage)
{
struct lov_object *loo = cl2lov(obj);
struct lov_layout_raid0 *r0 = lov_r0(loo);
struct cl_page *subpage;
struct cl_object *subobj;
struct lov_io_sub *sub;
- struct lov_page *lpg;
- struct cl_page *result;
+ struct lov_page *lpg = cl_object_page_slice(obj, page);
loff_t offset;
obd_off suboff;
int stripe;
&suboff);
LASSERT(rc == 0);
- OBD_SLAB_ALLOC_PTR_GFP(lpg, lov_page_kmem, CFS_ALLOC_IO);
- if (lpg == NULL)
- GOTO(out, result = ERR_PTR(-ENOMEM));
-
lpg->lps_invalid = 1;
cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);
sub = lov_sub_get(env, lio, stripe);
if (IS_ERR(sub))
- GOTO(out, result = (struct cl_page *)sub);
+ GOTO(out, rc = PTR_ERR(sub));
subobj = lovsub2cl(r0->lo_sub[stripe]);
subpage = cl_page_find_sub(sub->sub_env, subobj,
cl_index(subobj, suboff), vmpage, page);
lov_sub_put(sub);
if (IS_ERR(subpage))
- GOTO(out, result = subpage);
+ GOTO(out, rc = PTR_ERR(subpage));
if (likely(subpage->cp_parent == page)) {
lu_ref_add(&subpage->cp_reference, "lov", page);
lpg->lps_invalid = 0;
- result = NULL;
+ rc = 0;
} else {
CL_PAGE_DEBUG(D_ERROR, env, page, "parent page\n");
CL_PAGE_DEBUG(D_ERROR, env, subpage, "child page\n");
EXIT;
out:
- return(result);
+ return rc;
}
.cpo_print = lov_page_print
};
-struct cl_page *lov_page_init_empty(const struct lu_env *env,
- struct cl_object *obj, struct cl_page *page,
- cfs_page_t *vmpage)
+int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage)
{
- struct lov_page *lpg;
- int result = -ENOMEM;
+ struct lov_page *lpg = cl_object_page_slice(obj, page);
+ void *addr;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lpg, lov_page_kmem, CFS_ALLOC_IO);
- if (lpg != NULL) {
- void *addr;
- cl_page_slice_add(page, &lpg->lps_cl,
- obj, &lov_empty_page_ops);
- addr = cfs_kmap(vmpage);
- memset(addr, 0, cl_page_size(obj));
- cfs_kunmap(vmpage);
- cl_page_export(env, page, 1);
- result = 0;
- }
- RETURN(ERR_PTR(result));
+ cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
+ addr = cfs_kmap(vmpage);
+ memset(addr, 0, cl_page_size(obj));
+ cfs_kunmap(vmpage);
+ cl_page_export(env, page, 1);
+ RETURN(0);
}
below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
if (below != NULL) {
lu_object_add(obj, below);
+ cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page));
result = 0;
} else
result = -ENOMEM;
static void lovsub_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct lovsub_page *lsb = cl2lovsub_page(slice);
- ENTRY;
- OBD_SLAB_FREE_PTR(lsb, lovsub_page_kmem);
- EXIT;
}
static const struct cl_page_operations lovsub_page_ops = {
.cpo_fini = lovsub_page_fini
};
-struct cl_page *lovsub_page_init(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *unused)
+int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *unused)
{
- struct lovsub_page *lsb;
- int result;
-
+ struct lovsub_page *lsb = cl_object_page_slice(obj, page);
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lsb, lovsub_page_kmem, CFS_ALLOC_IO);
- if (lsb != NULL) {
- cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
- result = 0;
- } else
- result = -ENOMEM;
- RETURN(ERR_PTR(result));
+
+ cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
+ RETURN(0);
}
/** @} lov */
/* XXX hard coded GFP_* mask. */
INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
CFS_INIT_LIST_HEAD(&h->coh_locks);
+ h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
}
RETURN(result);
}
static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
int radix);
-static cfs_mem_cache_t *cl_page_kmem = NULL;
-
-static struct lu_kmem_descr cl_page_caches[] = {
- {
- .ckd_cache = &cl_page_kmem,
- .ckd_name = "cl_page_kmem",
- .ckd_size = sizeof (struct cl_page)
- },
- {
- .ckd_cache = NULL
- }
-};
-
#ifdef LIBCFS_DEBUG
# define PASSERT(env, page, expr) \
do { \
static void cl_page_free(const struct lu_env *env, struct cl_page *page)
{
struct cl_object *obj = page->cp_obj;
+ int pagesize = cl_object_header(obj)->coh_page_bufsize;
PASSERT(env, page, cfs_list_empty(&page->cp_batch));
PASSERT(env, page, page->cp_owner == NULL);
lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
cl_object_put(env, obj);
lu_ref_fini(&page->cp_reference);
- OBD_SLAB_FREE_PTR(page, cl_page_kmem);
+ OBD_FREE(page, pagesize);
EXIT;
}
*(enum cl_page_state *)&page->cp_state = state;
}
-static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
- pgoff_t ind, struct page *vmpage,
- enum cl_page_type type, struct cl_page **out)
+static struct cl_page *cl_page_alloc(const struct lu_env *env,
+ struct cl_object *o, pgoff_t ind, struct page *vmpage,
+ enum cl_page_type type)
{
- struct cl_page *page;
- struct cl_page *err = NULL;
- struct lu_object_header *head;
- int result;
+ struct cl_page *page;
+ struct lu_object_header *head;
- ENTRY;
- result = +1;
- OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO);
- if (page != NULL) {
- cfs_atomic_set(&page->cp_ref, 1);
+ ENTRY;
+ OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
+ CFS_ALLOC_IO);
+ if (page != NULL) {
+ int result;
+ cfs_atomic_set(&page->cp_ref, 1);
if (type == CPT_CACHEABLE) /* for radix tree */
cfs_atomic_inc(&page->cp_ref);
- page->cp_obj = o;
- cl_object_get(o);
- page->cp_obj_ref = lu_object_ref_add(&o->co_lu,
- "cl_page", page);
- page->cp_index = ind;
- cl_page_state_set_trust(page, CPS_CACHED);
+ page->cp_obj = o;
+ cl_object_get(o);
+ page->cp_obj_ref = lu_object_ref_add(&o->co_lu, "cl_page",page);
+ page->cp_index = ind;
+ cl_page_state_set_trust(page, CPS_CACHED);
page->cp_type = type;
CFS_INIT_LIST_HEAD(&page->cp_layers);
CFS_INIT_LIST_HEAD(&page->cp_batch);
CFS_INIT_LIST_HEAD(&page->cp_flight);
mutex_init(&page->cp_mutex);
- lu_ref_init(&page->cp_reference);
- head = o->co_lu.lo_header;
- cfs_list_for_each_entry(o, &head->loh_layers,
- co_lu.lo_linkage) {
- if (o->co_ops->coo_page_init != NULL) {
- err = o->co_ops->coo_page_init(env, o,
- page, vmpage);
- if (err != NULL) {
- cl_page_delete0(env, page, 0);
- cl_page_free(env, page);
- page = err;
- break;
- }
- }
- }
- if (err == NULL) {
+ lu_ref_init(&page->cp_reference);
+ head = o->co_lu.lo_header;
+ cfs_list_for_each_entry(o, &head->loh_layers,
+ co_lu.lo_linkage) {
+ if (o->co_ops->coo_page_init != NULL) {
+ result = o->co_ops->coo_page_init(env, o,
+ page, vmpage);
+ if (result != 0) {
+ cl_page_delete0(env, page, 0);
+ cl_page_free(env, page);
+ page = ERR_PTR(result);
+ break;
+ }
+ }
+ }
+ if (result == 0) {
CS_PAGE_INC(o, total);
CS_PAGE_INC(o, create);
CS_PAGESTATE_DEC(o, CPS_CACHED);
- result = 0;
- }
- } else
- page = ERR_PTR(-ENOMEM);
- *out = page;
- RETURN(result);
+ }
+ } else {
+ page = ERR_PTR(-ENOMEM);
+ }
+ RETURN(page);
}
/**
}
/* allocate and initialize cl_page */
- err = cl_page_alloc(env, o, idx, vmpage, type, &page);
- if (err != 0)
+ page = cl_page_alloc(env, o, idx, vmpage, type);
+ if (IS_ERR(page))
RETURN(page);
if (type == CPT_TRANSIENT) {
int cl_page_init(void)
{
- return lu_kmem_init(cl_page_caches);
+ return 0;
}
void cl_page_fini(void)
{
- lu_kmem_fini(cl_page_caches);
}
unsigned long dummy;
};
-static cfs_mem_cache_t *echo_page_kmem;
static cfs_mem_cache_t *echo_lock_kmem;
static cfs_mem_cache_t *echo_object_kmem;
static cfs_mem_cache_t *echo_thread_kmem;
static struct lu_kmem_descr echo_caches[] = {
{
- .ckd_cache = &echo_page_kmem,
- .ckd_name = "echo_page_kmem",
- .ckd_size = sizeof (struct echo_page)
- },
- {
.ckd_cache = &echo_lock_kmem,
.ckd_name = "echo_lock_kmem",
.ckd_size = sizeof (struct echo_lock)
cfs_atomic_dec(&eco->eo_npages);
page_cache_release(vmpage);
- OBD_SLAB_FREE_PTR(ep, echo_page_kmem);
EXIT;
}
*
* @{
*/
-static struct cl_page *echo_page_init(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage)
{
- struct echo_page *ep;
+ struct echo_page *ep = cl_object_page_slice(obj, page);
+ struct echo_object *eco = cl2echo_obj(obj);
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(ep, echo_page_kmem, CFS_ALLOC_IO);
- if (ep != NULL) {
- struct echo_object *eco = cl2echo_obj(obj);
- ep->ep_vmpage = vmpage;
- page_cache_get(vmpage);
- mutex_init(&ep->ep_lock);
- cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
- cfs_atomic_inc(&eco->eo_npages);
- }
- RETURN(ERR_PTR(ep ? 0 : -ENOMEM));
+ ep->ep_vmpage = vmpage;
+ page_cache_get(vmpage);
+ mutex_init(&ep->ep_lock);
+ cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
+ cfs_atomic_inc(&eco->eo_npages);
+ RETURN(0);
}
static int echo_io_init(const struct lu_env *env, struct cl_object *obj,
eco->eo_dev = ed;
cfs_atomic_set(&eco->eo_npages, 0);
+ cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
spin_lock(&ec->ec_lock);
cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
struct cl_lock *ops_lock;
};
-extern cfs_mem_cache_t *osc_page_kmem;
extern cfs_mem_cache_t *osc_lock_kmem;
extern cfs_mem_cache_t *osc_object_kmem;
extern cfs_mem_cache_t *osc_thread_kmem;
struct lu_object *osc_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
-struct cl_page *osc_page_init (const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
+int osc_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage);
void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
struct ldlm_res_id *resname);
* @{
*/
-cfs_mem_cache_t *osc_page_kmem;
cfs_mem_cache_t *osc_lock_kmem;
cfs_mem_cache_t *osc_object_kmem;
cfs_mem_cache_t *osc_thread_kmem;
struct lu_kmem_descr osc_caches[] = {
{
- .ckd_cache = &osc_page_kmem,
- .ckd_name = "osc_page_kmem",
- .ckd_size = sizeof (struct osc_page)
- },
- {
.ckd_cache = &osc_lock_kmem,
.ckd_name = "osc_lock_kmem",
.ckd_size = sizeof (struct osc_lock)
cfs_atomic_set(&osc->oo_nr_writes, 0);
spin_lock_init(&osc->oo_lock);
+ cl_object_page_init(lu2cl(obj), sizeof(struct osc_page));
+
return 0;
}
struct osc_page *opg = cl2osc_page(slice);
CDEBUG(D_TRACE, "%p\n", opg);
LASSERT(opg->ops_lock == NULL);
- OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
}
static void osc_page_transfer_get(struct osc_page *opg, const char *label)
.cpo_flush = osc_page_flush
};
-struct cl_page *osc_page_init(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+int osc_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage)
{
- struct osc_object *osc = cl2osc(obj);
- struct osc_page *opg;
- int result;
+ struct osc_object *osc = cl2osc(obj);
+ struct osc_page *opg = cl_object_page_slice(obj, page);
+ int result;
- OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
- if (opg != NULL) {
- opg->ops_from = 0;
- opg->ops_to = CFS_PAGE_SIZE;
-
- result = osc_prep_async_page(osc, opg, vmpage,
- cl_offset(obj, page->cp_index));
- if (result == 0) {
- struct osc_io *oio = osc_env_io(env);
- opg->ops_srvlock = osc_io_srvlock(oio);
- cl_page_slice_add(page, &opg->ops_cl, obj,
- &osc_page_ops);
- }
- /*
- * Cannot assert osc_page_protected() here as read-ahead
- * creates temporary pages outside of a lock.
- */
+ opg->ops_from = 0;
+ opg->ops_to = CFS_PAGE_SIZE;
+
+ result = osc_prep_async_page(osc, opg, vmpage,
+ cl_offset(obj, page->cp_index));
+ if (result == 0) {
+ struct osc_io *oio = osc_env_io(env);
+ opg->ops_srvlock = osc_io_srvlock(oio);
+ cl_page_slice_add(page, &opg->ops_cl, obj,
+ &osc_page_ops);
+ }
+ /*
+ * Cannot assert osc_page_protected() here as read-ahead
+ * creates temporary pages outside of a lock.
+ */
#ifdef INVARIANT_CHECK
- opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
+ opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
#endif
- /* ops_inflight and ops_lru are the same field, but it doesn't
- * hurt to initialize it twice :-) */
- CFS_INIT_LIST_HEAD(&opg->ops_inflight);
- CFS_INIT_LIST_HEAD(&opg->ops_lru);
- } else
- result = -ENOMEM;
+ /* ops_inflight and ops_lru are the same field, but it doesn't
+ * hurt to initialize it twice :-) */
+ CFS_INIT_LIST_HEAD(&opg->ops_inflight);
+ CFS_INIT_LIST_HEAD(&opg->ops_lru);
/* reserve an LRU space for this page */
if (page->cp_type == CPT_CACHEABLE && result == 0)
result = osc_lru_reserve(env, osc, opg);
- return ERR_PTR(result);
+ return result;
}
/**