X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fobdclass%2Fcl_object.c;h=8f29926b5929d0c0e2e6f011c6149384c9d99685;hp=9fae7a98843b18d778f9082371bef2eaa3d037df;hb=93fe562c5dd3829939e3bc0533918f66b19776a4;hpb=98060d83459ba10409f295898f0ec917f938b4d3 diff --git a/lustre/obdclass/cl_object.c b/lustre/obdclass/cl_object.c index 9fae7a9..8f29926 100644 --- a/lustre/obdclass/cl_object.c +++ b/lustre/obdclass/cl_object.c @@ -61,7 +61,7 @@ #include #include "cl_internal.h" -static cfs_mem_cache_t *cl_env_kmem; +static struct kmem_cache *cl_env_kmem; /** Lock class of cl_object_header::coh_page_guard */ static struct lock_class_key cl_page_guard_class; @@ -120,7 +120,7 @@ struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd, const struct lu_fid *fid, const struct cl_object_conf *c) { - cfs_might_sleep(); + might_sleep(); return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu)); } EXPORT_SYMBOL(cl_object_find); @@ -218,11 +218,11 @@ EXPORT_SYMBOL(cl_object_attr_unlock); int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr) { - struct lu_object_header *top; - int result; + struct lu_object_header *top; + int result; - LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj)); - ENTRY; + LASSERT(spin_is_locked(cl_object_attr_guard(obj))); + ENTRY; top = obj->co_lu.lo_header; result = 0; @@ -250,11 +250,11 @@ EXPORT_SYMBOL(cl_object_attr_get); int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj, const struct cl_attr *attr, unsigned v) { - struct lu_object_header *top; - int result; + struct lu_object_header *top; + int result; - LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj)); - ENTRY; + LASSERT(spin_is_locked(cl_object_attr_guard(obj))); + ENTRY; top = obj->co_lu.lo_header; result = 0; @@ -533,6 +533,12 @@ EXPORT_SYMBOL(cl_site_stats_print); * bz20044, bz22683. */ +static CFS_LIST_HEAD(cl_envs); +static unsigned cl_envs_cached_nr = 0; +static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit + * for now. */ +static DEFINE_SPINLOCK(cl_envs_guard); + struct cl_env { void *ce_magic; struct lu_env ce_lu; @@ -547,18 +553,18 @@ struct cl_env { */ cfs_hlist_node_t ce_node; #endif - /** - * Owner for the current cl_env. - * - * If LL_TASK_CL_ENV is defined, this point to the owning cfs_current(), - * only for debugging purpose ; - * Otherwise hash is used, and this is the key for cfs_hash. - * Now current thread pid is stored. Note using thread pointer would - * lead to unbalanced hash because of its specific allocation locality - * and could be varied for different platforms and OSes, even different - * OS versions. - */ - void *ce_owner; + /** + * Owner for the current cl_env. + * + * If LL_TASK_CL_ENV is defined, this point to the owning current, + * only for debugging purpose ; + * Otherwise hash is used, and this is the key for cfs_hash. + * Now current thread pid is stored. Note using thread pointer would + * lead to unbalanced hash because of its specific allocation locality + * and could be varied for different platforms and OSes, even different + * OS versions. + */ + void *ce_owner; /* * Linkage into global list of all client environments. Used for @@ -649,35 +655,35 @@ static cfs_hash_ops_t cl_env_hops = { static inline struct cl_env *cl_env_fetch(void) { - struct cl_env *cle; + struct cl_env *cle; - cle = cfs_hash_lookup(cl_env_hash, (void *) (long) cfs_current()->pid); - LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0)); - return cle; + cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid); + LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0)); + return cle; } static inline void cl_env_attach(struct cl_env *cle) { - if (cle) { - int rc; - - LASSERT(cle->ce_owner == NULL); - cle->ce_owner = (void *) (long) cfs_current()->pid; - rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner, - &cle->ce_node); - LASSERT(rc == 0); - } + if (cle) { + int rc; + + LASSERT(cle->ce_owner == NULL); + cle->ce_owner = (void *) (long) current->pid; + rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner, + &cle->ce_node); + LASSERT(rc == 0); + } } static inline void cl_env_do_detach(struct cl_env *cle) { - void *cookie; + void *cookie; - LASSERT(cle->ce_owner == (void *) (long) cfs_current()->pid); - cookie = cfs_hash_del(cl_env_hash, cle->ce_owner, - &cle->ce_node); - LASSERT(cookie == cle); - cle->ce_owner = NULL; + LASSERT(cle->ce_owner == (void *) (long) current->pid); + cookie = cfs_hash_del(cl_env_hash, cle->ce_owner, + &cle->ce_node); + LASSERT(cookie == cle); + cle->ce_owner = NULL; } static int cl_env_store_init(void) { @@ -702,30 +708,30 @@ static void cl_env_store_fini(void) { static inline struct cl_env *cl_env_fetch(void) { - struct cl_env *cle; + struct cl_env *cle; - cle = cfs_current()->LL_TASK_CL_ENV; - if (cle && cle->ce_magic != &cl_env_init0) - cle = NULL; - return cle; + cle = current->LL_TASK_CL_ENV; + if (cle && cle->ce_magic != &cl_env_init0) + cle = NULL; + return cle; } static inline void cl_env_attach(struct cl_env *cle) { - if (cle) { - LASSERT(cle->ce_owner == NULL); - cle->ce_owner = cfs_current(); - cle->ce_prev = cfs_current()->LL_TASK_CL_ENV; - cfs_current()->LL_TASK_CL_ENV = cle; - } + if (cle) { + LASSERT(cle->ce_owner == NULL); + cle->ce_owner = current; + cle->ce_prev = current->LL_TASK_CL_ENV; + current->LL_TASK_CL_ENV = cle; + } } static inline void cl_env_do_detach(struct cl_env *cle) { - LASSERT(cle->ce_owner == cfs_current()); - LASSERT(cfs_current()->LL_TASK_CL_ENV == cle); - cfs_current()->LL_TASK_CL_ENV = cle->ce_prev; - cle->ce_owner = NULL; + LASSERT(cle->ce_owner == current); + LASSERT(current->LL_TASK_CL_ENV == cle); + current->LL_TASK_CL_ENV = cle->ce_prev; + cle->ce_owner = NULL; } static int cl_env_store_init(void) { return 0; } @@ -749,7 +755,7 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) struct lu_env *env; struct cl_env *cle; - OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, __GFP_IO); if (cle != NULL) { int rc; @@ -787,6 +793,40 @@ static void cl_env_fini(struct cl_env *cle) OBD_SLAB_FREE_PTR(cle, cl_env_kmem); } +static struct lu_env *cl_env_obtain(void *debug) +{ + struct cl_env *cle; + struct lu_env *env; + + ENTRY; + spin_lock(&cl_envs_guard); + LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs))); + if (cl_envs_cached_nr > 0) { + int rc; + + cle = container_of(cl_envs.next, struct cl_env, ce_linkage); + cfs_list_del_init(&cle->ce_linkage); + cl_envs_cached_nr--; + spin_unlock(&cl_envs_guard); + + env = &cle->ce_lu; + rc = lu_env_refill(env); + if (rc == 0) { + cl_env_init0(cle, debug); + lu_context_enter(&env->le_ctx); + lu_context_enter(&cle->ce_ses); + } else { + cl_env_fini(cle); + env = ERR_PTR(rc); + } + } else { + spin_unlock(&cl_envs_guard); + env = cl_env_new(lu_context_tags_default, + lu_session_tags_default, debug); + } + RETURN(env); +} + static inline struct cl_env *cl_env_container(struct lu_env *env) { return container_of(env, struct cl_env, ce_lu); @@ -818,6 +858,8 @@ EXPORT_SYMBOL(cl_env_peek); * Returns lu_env: if there already is an environment associated with the * current thread, it is returned, otherwise, new environment is allocated. * + * Allocations are amortized through the global cache of environments. + * * \param refcheck pointer to a counter used to detect environment leaks. In * the usual case cl_env_get() and cl_env_put() are called in the same lexical * scope and pointer to the same integer is passed as \a refcheck. This is @@ -831,10 +873,7 @@ struct lu_env *cl_env_get(int *refcheck) env = cl_env_peek(refcheck); if (env == NULL) { - env = cl_env_new(lu_context_tags_default, - lu_session_tags_default, - __builtin_return_address(0)); - + env = cl_env_obtain(__builtin_return_address(0)); if (!IS_ERR(env)) { struct cl_env *cle; @@ -878,6 +917,33 @@ static void cl_env_exit(struct cl_env *cle) } /** + * Finalizes and frees a given number of cached environments. This is done to + * (1) free some memory (not currently hooked into VM), or (2) release + * references to modules. + */ +unsigned cl_env_cache_purge(unsigned nr) +{ + struct cl_env *cle; + + ENTRY; + spin_lock(&cl_envs_guard); + for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) { + cle = container_of(cl_envs.next, struct cl_env, ce_linkage); + cfs_list_del_init(&cle->ce_linkage); + LASSERT(cl_envs_cached_nr > 0); + cl_envs_cached_nr--; + spin_unlock(&cl_envs_guard); + + cl_env_fini(cle); + spin_lock(&cl_envs_guard); + } + LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs))); + spin_unlock(&cl_envs_guard); + RETURN(nr); +} +EXPORT_SYMBOL(cl_env_cache_purge); + +/** * Release an environment. * * Decrement \a env reference counter. When counter drops to 0, nothing in @@ -899,7 +965,21 @@ void cl_env_put(struct lu_env *env, int *refcheck) cl_env_detach(cle); cle->ce_debug = NULL; cl_env_exit(cle); - cl_env_fini(cle); + /* + * Don't bother to take a lock here. + * + * Return environment to the cache only when it was allocated + * with the standard tags. + */ + if (cl_envs_cached_nr < cl_envs_cached_max && + (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD && + (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) { + spin_lock(&cl_envs_guard); + cfs_list_add(&cle->ce_linkage, &cl_envs); + cl_envs_cached_nr++; + spin_unlock(&cl_envs_guard); + } else + cl_env_fini(cle); } } EXPORT_SYMBOL(cl_env_put);