X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Fcl_page.c;h=9e57e0910ce8e214b2268a983ca2a0c2dc464747;hb=db6613f5bed1606cc8f97b46d1b298746af03a75;hp=a9d81467552357659114e5dc5baad51b35c6f159;hpb=cc5ef6ae5412c3e94061d949ef684036eb003f27;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/cl_page.c b/lustre/obdclass/cl_page.c index a9d8146..9e57e09 100644 --- a/lustre/obdclass/cl_page.c +++ b/lustre/obdclass/cl_page.c @@ -27,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Intel Corporation. + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -64,7 +64,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg, ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp)) #endif /* !LIBCFS_DEBUG */ -#ifdef INVARIANT_CHECK +#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK # define PINVRNT(env, page, expr) \ do { \ if (unlikely(!(expr))) { \ @@ -72,10 +72,10 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg, LINVRNT(0); \ } \ } while (0) -#else /* !INVARIANT_CHECK */ +#else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */ # define PINVRNT(env, page, exp) \ - ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp)) -#endif /* !INVARIANT_CHECK */ + ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp)) +#endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */ /* Disable page statistic by default due to huge performance penalty. */ #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING @@ -155,14 +155,14 @@ cl_page_at_trusted(const struct cl_page *page, */ struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index) { - struct cl_page *page; + struct cl_page *page; - LASSERT_SPIN_LOCKED(&hdr->coh_page_guard); + LASSERT(spin_is_locked(&hdr->coh_page_guard)); - page = radix_tree_lookup(&hdr->coh_tree, index); - if (page != NULL) - cl_page_get_trust(page); - return page; + page = radix_tree_lookup(&hdr->coh_tree, index); + if (page != NULL) + cl_page_get_trust(page); + return page; } EXPORT_SYMBOL(cl_page_lookup); @@ -179,8 +179,8 @@ EXPORT_SYMBOL(cl_page_lookup); * Return at least one page in @queue unless there is no covered page. */ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io, pgoff_t start, pgoff_t end, - cl_page_gang_cb_t cb, void *cbdata) + struct cl_io *io, pgoff_t start, pgoff_t end, + cl_page_gang_cb_t cb, void *cbdata) { struct cl_object_header *hdr; struct cl_page *page; @@ -223,46 +223,46 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj, */ PASSERT(env, page, slice != NULL); - page = slice->cpl_page; - /* - * Can safely call cl_page_get_trust() under - * radix-tree spin-lock. - * - * XXX not true, because @page is from object another - * than @hdr and protected by different tree lock. - */ - cl_page_get_trust(page); - lu_ref_add_atomic(&page->cp_reference, - "gang_lookup", cfs_current()); - pvec[j++] = page; - } + page = slice->cpl_page; + /* + * Can safely call cl_page_get_trust() under + * radix-tree spin-lock. + * + * XXX not true, because @page is from object another + * than @hdr and protected by different tree lock. + */ + cl_page_get_trust(page); + lu_ref_add_atomic(&page->cp_reference, + "gang_lookup", current); + pvec[j++] = page; + } - /* - * Here a delicate locking dance is performed. Current thread - * holds a reference to a page, but has to own it before it - * can be placed into queue. Owning implies waiting, so - * radix-tree lock is to be released. After a wait one has to - * check that pages weren't truncated (cl_page_own() returns - * error in the latter case). - */ + /* + * Here a delicate locking dance is performed. Current thread + * holds a reference to a page, but has to own it before it + * can be placed into queue. Owning implies waiting, so + * radix-tree lock is to be released. After a wait one has to + * check that pages weren't truncated (cl_page_own() returns + * error in the latter case). + */ spin_unlock(&hdr->coh_page_guard); - tree_lock = 0; - - for (i = 0; i < j; ++i) { - page = pvec[i]; - if (res == CLP_GANG_OKAY) - res = (*cb)(env, io, page, cbdata); - lu_ref_del(&page->cp_reference, - "gang_lookup", cfs_current()); - cl_page_put(env, page); - } - if (nr < CLT_PVEC_SIZE || end_of_region) - break; + tree_lock = 0; + + for (i = 0; i < j; ++i) { + page = pvec[i]; + if (res == CLP_GANG_OKAY) + res = (*cb)(env, io, page, cbdata); + lu_ref_del(&page->cp_reference, + "gang_lookup", current); + cl_page_put(env, page); + } + if (nr < CLT_PVEC_SIZE || end_of_region) + break; - if (res == CLP_GANG_OKAY && cfs_need_resched()) - res = CLP_GANG_RESCHED; - if (res != CLP_GANG_OKAY) - break; + if (res == CLP_GANG_OKAY && need_resched()) + res = CLP_GANG_RESCHED; + if (res != CLP_GANG_OKAY) + break; spin_lock(&hdr->coh_page_guard); tree_lock = 1; @@ -284,19 +284,19 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page) PASSERT(env, page, page->cp_parent == NULL); PASSERT(env, page, page->cp_state == CPS_FREEING); - ENTRY; - cfs_might_sleep(); - while (!cfs_list_empty(&page->cp_layers)) { - struct cl_page_slice *slice; - - slice = cfs_list_entry(page->cp_layers.next, - struct cl_page_slice, cpl_linkage); - cfs_list_del_init(page->cp_layers.next); - slice->cpl_ops->cpo_fini(env, slice); - } + ENTRY; + might_sleep(); + while (!cfs_list_empty(&page->cp_layers)) { + struct cl_page_slice *slice; + + slice = cfs_list_entry(page->cp_layers.next, + struct cl_page_slice, cpl_linkage); + cfs_list_del_init(page->cp_layers.next); + slice->cpl_ops->cpo_fini(env, slice); + } CS_PAGE_DEC(obj, total); CS_PAGESTATE_DEC(obj, page->cp_state); - lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page); + lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page); cl_object_put(env, obj); lu_ref_fini(&page->cp_reference); OBD_FREE(page, pagesize); @@ -323,15 +323,16 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env, ENTRY; OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize, - CFS_ALLOC_IO); + __GFP_IO); if (page != NULL) { - int result; + int result = 0; cfs_atomic_set(&page->cp_ref, 1); if (type == CPT_CACHEABLE) /* for radix tree */ cfs_atomic_inc(&page->cp_ref); page->cp_obj = o; cl_object_get(o); - page->cp_obj_ref = lu_object_ref_add(&o->co_lu, "cl_page",page); + lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page", + page); page->cp_index = ind; cl_page_state_set_trust(page, CPS_CACHED); page->cp_type = type; @@ -382,17 +383,17 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, enum cl_page_type type, struct cl_page *parent) { - struct cl_page *page = NULL; - struct cl_page *ghost = NULL; - struct cl_object_header *hdr; - int err; + struct cl_page *page = NULL; + struct cl_page *ghost = NULL; + struct cl_object_header *hdr; + int err; - LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT); - cfs_might_sleep(); + LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT); + might_sleep(); - ENTRY; + ENTRY; - hdr = cl_object_header(o); + hdr = cl_object_header(o); CS_PAGE_INC(o, lookup); CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n", @@ -513,7 +514,7 @@ static inline int cl_page_invariant(const struct cl_page *pg) child = pg->cp_child; owner = pg->cp_owner; - return cl_page_in_use(pg) && + return cl_page_in_use_noref(pg) && ergo(parent != NULL, parent->cp_child == pg) && ergo(child != NULL, child->cp_parent == pg) && ergo(child != NULL, pg->cp_obj != child->cp_obj) && @@ -654,7 +655,7 @@ EXPORT_SYMBOL(cl_page_put); /** * Returns a VM page associated with a given cl_page. */ -cfs_page_t *cl_page_vmpage(const struct lu_env *env, struct cl_page *page) +struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page) { const struct cl_page_slice *slice; @@ -677,7 +678,7 @@ EXPORT_SYMBOL(cl_page_vmpage); /** * Returns a cl_page associated with a VM page, and given cl_object. */ -struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj) +struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj) { struct cl_page *top; struct cl_page *page; @@ -862,7 +863,7 @@ void cl_page_disown0(const struct lu_env *env, ENTRY; state = pg->cp_state; PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING); - PINVRNT(env, pg, cl_page_invariant(pg)); + PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING); cl_page_owner_clear(pg); if (state == CPS_OWNED) @@ -1044,7 +1045,8 @@ EXPORT_SYMBOL(cl_page_unassume); void cl_page_disown(const struct lu_env *env, struct cl_io *io, struct cl_page *pg) { - PINVRNT(env, pg, cl_page_is_owned(pg, io)); + PINVRNT(env, pg, cl_page_is_owned(pg, io) || + pg->cp_state == CPS_FREEING); ENTRY; pg = cl_page_top(pg); @@ -1473,36 +1475,36 @@ static int page_prune_cb(const struct lu_env *env, struct cl_io *io, */ int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj) { - struct cl_thread_info *info; - struct cl_object *obj = cl_object_top(clobj); - struct cl_io *io; - int result; + struct cl_thread_info *info; + struct cl_object *obj = cl_object_top(clobj); + struct cl_io *io; + int result; - ENTRY; - info = cl_env_info(env); - io = &info->clt_io; + ENTRY; + info = cl_env_info(env); + io = &info->clt_io; - /* - * initialize the io. This is ugly since we never do IO in this - * function, we just make cl_page_list functions happy. -jay - */ - io->ci_obj = obj; + /* + * initialize the io. This is ugly since we never do IO in this + * function, we just make cl_page_list functions happy. -jay + */ + io->ci_obj = obj; io->ci_ignore_layout = 1; - result = cl_io_init(env, io, CIT_MISC, obj); - if (result != 0) { - cl_io_fini(env, io); - RETURN(io->ci_result); - } + result = cl_io_init(env, io, CIT_MISC, obj); + if (result != 0) { + cl_io_fini(env, io); + RETURN(io->ci_result); + } - do { - result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF, - page_prune_cb, NULL); - if (result == CLP_GANG_RESCHED) - cfs_cond_resched(); - } while (result != CLP_GANG_OKAY); + do { + result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF, + page_prune_cb, NULL); + if (result == CLP_GANG_RESCHED) + cond_resched(); + } while (result != CLP_GANG_OKAY); - cl_io_fini(env, io); - RETURN(result); + cl_io_fini(env, io); + RETURN(result); } EXPORT_SYMBOL(cl_pages_prune); @@ -1574,10 +1576,7 @@ EXPORT_SYMBOL(cl_page_cancel); */ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx) { - /* - * XXX for now. - */ - return (loff_t)idx << CFS_PAGE_SHIFT; + return (loff_t)idx << PAGE_CACHE_SHIFT; } EXPORT_SYMBOL(cl_offset); @@ -1586,16 +1585,13 @@ EXPORT_SYMBOL(cl_offset); */ pgoff_t cl_index(const struct cl_object *obj, loff_t offset) { - /* - * XXX for now. - */ - return offset >> CFS_PAGE_SHIFT; + return offset >> PAGE_CACHE_SHIFT; } EXPORT_SYMBOL(cl_index); int cl_page_size(const struct cl_object *obj) { - return 1 << CFS_PAGE_SHIFT; + return 1 << PAGE_CACHE_SHIFT; } EXPORT_SYMBOL(cl_page_size);