X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Fcl_page.c;h=b7dfab1e1df8748896db632a418842f2eea7e4d3;hb=bc71055256db623ba2062f8f299d8b603d89e0d9;hp=91cb7bccdc2df87defb4a48becf490536b21d5de;hpb=33257361eef3aeb09eee0d10026be17b6f3f5bcb;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/cl_page.c b/lustre/obdclass/cl_page.c index 91cb7bc..b7dfab1 100644 --- a/lustre/obdclass/cl_page.c +++ b/lustre/obdclass/cl_page.c @@ -27,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -51,19 +51,6 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg, int radix); -static cfs_mem_cache_t *cl_page_kmem = NULL; - -static struct lu_kmem_descr cl_page_caches[] = { - { - .ckd_cache = &cl_page_kmem, - .ckd_name = "cl_page_kmem", - .ckd_size = sizeof (struct cl_page) - }, - { - .ckd_cache = NULL - } -}; - #ifdef LIBCFS_DEBUG # define PASSERT(env, page, expr) \ do { \ @@ -77,7 +64,7 @@ static struct lu_kmem_descr cl_page_caches[] = { ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp)) #endif /* !LIBCFS_DEBUG */ -#ifdef INVARIANT_CHECK +#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK # define PINVRNT(env, page, expr) \ do { \ if (unlikely(!(expr))) { \ @@ -85,10 +72,10 @@ static struct lu_kmem_descr cl_page_caches[] = { LINVRNT(0); \ } \ } while (0) -#else /* !INVARIANT_CHECK */ +#else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */ # define PINVRNT(env, page, exp) \ - ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp)) -#endif /* !INVARIANT_CHECK */ + ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp)) +#endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */ /* Disable page statistic by default due to huge performance penalty. */ #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING @@ -289,6 +276,7 @@ EXPORT_SYMBOL(cl_page_gang_lookup); static void cl_page_free(const struct lu_env *env, struct cl_page *page) { struct cl_object *obj = page->cp_obj; + int pagesize = cl_object_header(obj)->coh_page_bufsize; PASSERT(env, page, cfs_list_empty(&page->cp_batch)); PASSERT(env, page, page->cp_owner == NULL); @@ -308,10 +296,10 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page) } CS_PAGE_DEC(obj, total); CS_PAGESTATE_DEC(obj, page->cp_state); - lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page); + lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page); cl_object_put(env, obj); lu_ref_fini(&page->cp_reference); - OBD_SLAB_FREE_PTR(page, cl_page_kmem); + OBD_FREE(page, pagesize); EXIT; } @@ -326,58 +314,56 @@ static inline void cl_page_state_set_trust(struct cl_page *page, *(enum cl_page_state *)&page->cp_state = state; } -static int cl_page_alloc(const struct lu_env *env, struct cl_object *o, - pgoff_t ind, struct page *vmpage, - enum cl_page_type type, struct cl_page **out) +static struct cl_page *cl_page_alloc(const struct lu_env *env, + struct cl_object *o, pgoff_t ind, struct page *vmpage, + enum cl_page_type type) { - struct cl_page *page; - struct cl_page *err = NULL; - struct lu_object_header *head; - int result; + struct cl_page *page; + struct lu_object_header *head; - ENTRY; - result = +1; - OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO); - if (page != NULL) { - cfs_atomic_set(&page->cp_ref, 1); + ENTRY; + OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize, + __GFP_IO); + if (page != NULL) { + int result = 0; + cfs_atomic_set(&page->cp_ref, 1); if (type == CPT_CACHEABLE) /* for radix tree */ cfs_atomic_inc(&page->cp_ref); - page->cp_obj = o; - cl_object_get(o); - page->cp_obj_ref = lu_object_ref_add(&o->co_lu, - "cl_page", page); - page->cp_index = ind; - cl_page_state_set_trust(page, CPS_CACHED); + page->cp_obj = o; + cl_object_get(o); + lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page", + page); + page->cp_index = ind; + cl_page_state_set_trust(page, CPS_CACHED); page->cp_type = type; CFS_INIT_LIST_HEAD(&page->cp_layers); CFS_INIT_LIST_HEAD(&page->cp_batch); CFS_INIT_LIST_HEAD(&page->cp_flight); mutex_init(&page->cp_mutex); - lu_ref_init(&page->cp_reference); - head = o->co_lu.lo_header; - cfs_list_for_each_entry(o, &head->loh_layers, - co_lu.lo_linkage) { - if (o->co_ops->coo_page_init != NULL) { - err = o->co_ops->coo_page_init(env, o, - page, vmpage); - if (err != NULL) { - cl_page_delete0(env, page, 0); - cl_page_free(env, page); - page = err; - break; - } - } - } - if (err == NULL) { + lu_ref_init(&page->cp_reference); + head = o->co_lu.lo_header; + cfs_list_for_each_entry(o, &head->loh_layers, + co_lu.lo_linkage) { + if (o->co_ops->coo_page_init != NULL) { + result = o->co_ops->coo_page_init(env, o, + page, vmpage); + if (result != 0) { + cl_page_delete0(env, page, 0); + cl_page_free(env, page); + page = ERR_PTR(result); + break; + } + } + } + if (result == 0) { CS_PAGE_INC(o, total); CS_PAGE_INC(o, create); CS_PAGESTATE_DEC(o, CPS_CACHED); - result = 0; - } - } else - page = ERR_PTR(-ENOMEM); - *out = page; - RETURN(result); + } + } else { + page = ERR_PTR(-ENOMEM); + } + RETURN(page); } /** @@ -440,8 +426,8 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, } /* allocate and initialize cl_page */ - err = cl_page_alloc(env, o, idx, vmpage, type, &page); - if (err != 0) + page = cl_page_alloc(env, o, idx, vmpage, type); + if (IS_ERR(page)) RETURN(page); if (type == CPT_TRANSIENT) { @@ -669,7 +655,7 @@ EXPORT_SYMBOL(cl_page_put); /** * Returns a VM page associated with a given cl_page. */ -cfs_page_t *cl_page_vmpage(const struct lu_env *env, struct cl_page *page) +struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page) { const struct cl_page_slice *slice; @@ -692,7 +678,7 @@ EXPORT_SYMBOL(cl_page_vmpage); /** * Returns a cl_page associated with a VM page, and given cl_object. */ -struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj) +struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj) { struct cl_page *top; struct cl_page *page; @@ -1282,6 +1268,8 @@ int cl_page_prep(const struct lu_env *env, struct cl_io *io, * PG_writeback without risking other layers deciding to skip this * page. */ + if (crt >= CRT_NR) + return -EINVAL; result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep)); if (result == 0) cl_page_io_start(env, pg, crt); @@ -1327,6 +1315,8 @@ void cl_page_completion(const struct lu_env *env, } cl_page_state_set(env, pg, CPS_CACHED); + if (crt >= CRT_NR) + return; CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion), (const struct lu_env *, const struct cl_page_slice *, int), ioret); @@ -1334,8 +1324,17 @@ void cl_page_completion(const struct lu_env *env, LASSERT(cl_page_is_vmlocked(env, pg)); LASSERT(pg->cp_sync_io == anchor); pg->cp_sync_io = NULL; + } + /* + * As page->cp_obj is pinned by a reference from page->cp_req, it is + * safe to call cl_page_put() without risking object destruction in a + * non-blocking context. + */ + cl_page_put(env, pg); + + if (anchor) cl_sync_io_note(anchor, ioret); - } + EXIT; } EXPORT_SYMBOL(cl_page_completion); @@ -1357,6 +1356,8 @@ int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg, PINVRNT(env, pg, crt < CRT_NR); ENTRY; + if (crt >= CRT_NR) + RETURN(-EINVAL); result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready), (const struct lu_env *, const struct cl_page_slice *)); @@ -1393,6 +1394,9 @@ int cl_page_cache_add(const struct lu_env *env, struct cl_io *io, ENTRY; + if (crt >= CRT_NR) + RETURN(-EINVAL); + cfs_list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) { if (scan->cpl_ops->io[crt].cpo_cache_add == NULL) continue; @@ -1571,10 +1575,7 @@ EXPORT_SYMBOL(cl_page_cancel); */ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx) { - /* - * XXX for now. - */ - return (loff_t)idx << CFS_PAGE_SHIFT; + return (loff_t)idx << PAGE_CACHE_SHIFT; } EXPORT_SYMBOL(cl_offset); @@ -1583,16 +1584,13 @@ EXPORT_SYMBOL(cl_offset); */ pgoff_t cl_index(const struct cl_object *obj, loff_t offset) { - /* - * XXX for now. - */ - return offset >> CFS_PAGE_SHIFT; + return offset >> PAGE_CACHE_SHIFT; } EXPORT_SYMBOL(cl_index); int cl_page_size(const struct cl_object *obj) { - return 1 << CFS_PAGE_SHIFT; + return 1 << PAGE_CACHE_SHIFT; } EXPORT_SYMBOL(cl_page_size); @@ -1620,10 +1618,9 @@ EXPORT_SYMBOL(cl_page_slice_add); int cl_page_init(void) { - return lu_kmem_init(cl_page_caches); + return 0; } void cl_page_fini(void) { - lu_kmem_fini(cl_page_caches); }