X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fobdclass%2Fcl_page.c;h=03ad075187257aa5a55e77c6ac2f3686c36236ed;hp=625d8cb288565b4c0942eaea47bfbd2c0cf71e1f;hb=f625f670afbe954030ff81f0f8522137d6cdd335;hpb=3f3a24dc5d7d421e1514dc49cc7c2eb5cb762b26 diff --git a/lustre/obdclass/cl_page.c b/lustre/obdclass/cl_page.c index 625d8cb..03ad075 100644 --- a/lustre/obdclass/cl_page.c +++ b/lustre/obdclass/cl_page.c @@ -27,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -218,7 +218,6 @@ struct cl_page *cl_page_alloc(const struct lu_env *env, } RETURN(page); } -EXPORT_SYMBOL(cl_page_alloc); /** * Returns a cl_page with index \a idx at the object \a o, and associated with @@ -452,30 +451,6 @@ EXPORT_SYMBOL(cl_page_at); __result; \ }) -#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \ -({ \ - const struct lu_env *__env = (_env); \ - struct cl_page *__page = (_page); \ - const struct cl_page_slice *__scan; \ - int __result; \ - ptrdiff_t __op = (_op); \ - int (*__method)_proto; \ - \ - __result = 0; \ - list_for_each_entry_reverse(__scan, &__page->cp_layers, \ - cpl_linkage) { \ - __method = *(void **)((char *)__scan->cpl_ops + __op); \ - if (__method != NULL) { \ - __result = (*__method)(__env, __scan, ## __VA_ARGS__); \ - if (__result != 0) \ - break; \ - } \ - } \ - if (__result > 0) \ - __result = 0; \ - __result; \ -}) - #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \ do { \ const struct lu_env *__env = (_env); \ @@ -769,7 +744,7 @@ EXPORT_SYMBOL(cl_page_discard); /** * Version of cl_page_delete() that can be called for not fully constructed - * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0() + * pages, e.g. in an error handling cl_page_find()->cl_page_delete0() * path. Doesn't check page invariant. */ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg) @@ -1027,30 +1002,6 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io, EXPORT_SYMBOL(cl_page_flush); /** - * Checks whether page is protected by any extent lock is at least required - * mode. - * - * \return the same as in cl_page_operations::cpo_is_under_lock() method. - * \see cl_page_operations::cpo_is_under_lock() - */ -int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, pgoff_t *max_index) -{ - int rc; - - PINVRNT(env, page, cl_page_invariant(page)); - - ENTRY; - rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock), - (const struct lu_env *, - const struct cl_page_slice *, - struct cl_io *, pgoff_t *), - io, max_index); - RETURN(rc); -} -EXPORT_SYMBOL(cl_page_is_under_lock); - -/** * Tells transfer engine that only part of a page is to be transmitted. * * \see cl_page_operations::cpo_clip() @@ -1106,7 +1057,6 @@ int cl_page_cancel(const struct lu_env *env, struct cl_page *page) (const struct lu_env *, const struct cl_page_slice *)); } -EXPORT_SYMBOL(cl_page_cancel); /** * Converts a byte offset within object \a obj into a page index. @@ -1126,9 +1076,9 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset) } EXPORT_SYMBOL(cl_index); -int cl_page_size(const struct cl_object *obj) +size_t cl_page_size(const struct cl_object *obj) { - return 1 << PAGE_CACHE_SHIFT; + return 1UL << PAGE_CACHE_SHIFT; } EXPORT_SYMBOL(cl_page_size); @@ -1154,3 +1104,52 @@ void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice, EXIT; } EXPORT_SYMBOL(cl_page_slice_add); + +/** + * Allocate and initialize cl_cache, called by ll_init_sbi(). + */ +struct cl_client_cache *cl_cache_init(unsigned long lru_page_max) +{ + struct cl_client_cache *cache = NULL; + + ENTRY; + OBD_ALLOC(cache, sizeof(*cache)); + if (cache == NULL) + RETURN(NULL); + + /* Initialize cache data */ + atomic_set(&cache->ccc_users, 1); + cache->ccc_lru_max = lru_page_max; + atomic_long_set(&cache->ccc_lru_left, lru_page_max); + spin_lock_init(&cache->ccc_lru_lock); + INIT_LIST_HEAD(&cache->ccc_lru); + + /* turn unstable check off by default as it impacts performance */ + cache->ccc_unstable_check = 0; + atomic_long_set(&cache->ccc_unstable_nr, 0); + init_waitqueue_head(&cache->ccc_unstable_waitq); + + RETURN(cache); +} +EXPORT_SYMBOL(cl_cache_init); + +/** + * Increase cl_cache refcount + */ +void cl_cache_incref(struct cl_client_cache *cache) +{ + atomic_inc(&cache->ccc_users); +} +EXPORT_SYMBOL(cl_cache_incref); + +/** + * Decrease cl_cache refcount and free the cache if refcount=0. + * Since llite, lov and osc all hold cl_cache refcount, + * the free will not cause race. (LU-6173) + */ +void cl_cache_decref(struct cl_client_cache *cache) +{ + if (atomic_dec_and_test(&cache->ccc_users)) + OBD_FREE(cache, sizeof(*cache)); +} +EXPORT_SYMBOL(cl_cache_decref);