X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Fcl_object.c;h=1eb8e76d57349a858906da456051e9b18f4b2e4b;hb=ac7193045474854a1a4136c7d897d8188913b38c;hp=f8ef9f7bf490736f171824d9bf2c72edf78956eb;hpb=589bc6c478b91d2d0e4cdd2aefd83dd45be2ef51;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/cl_object.c b/lustre/obdclass/cl_object.c index f8ef9f7..1eb8e76 100644 --- a/lustre/obdclass/cl_object.c +++ b/lustre/obdclass/cl_object.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,8 +24,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -50,9 +50,6 @@ */ #define DEBUG_SUBSYSTEM S_CLASS -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #include /* class_put_type() */ @@ -61,21 +58,20 @@ #include #include #include /* for cfs_hash stuff */ -/* lu_time_global_{init,fini}() */ -#include - #include #include "cl_internal.h" static cfs_mem_cache_t *cl_env_kmem; /** Lock class of cl_object_header::coh_page_guard */ -static cfs_lock_class_key_t cl_page_guard_class; +static struct lock_class_key cl_page_guard_class; /** Lock class of cl_object_header::coh_lock_guard */ -static cfs_lock_class_key_t cl_lock_guard_class; +static struct lock_class_key cl_lock_guard_class; /** Lock class of cl_object_header::coh_attr_guard */ -static cfs_lock_class_key_t cl_attr_guard_class; +static struct lock_class_key cl_attr_guard_class; +extern __u32 lu_context_tags_default; +extern __u32 lu_session_tags_default; /** * Initialize cl_object_header. */ @@ -86,16 +82,17 @@ int cl_object_header_init(struct cl_object_header *h) ENTRY; result = lu_object_header_init(&h->coh_lu); if (result == 0) { - cfs_spin_lock_init(&h->coh_page_guard); - cfs_spin_lock_init(&h->coh_lock_guard); - cfs_spin_lock_init(&h->coh_attr_guard); - cfs_lockdep_set_class(&h->coh_attr_guard, &cl_page_guard_class); - cfs_lockdep_set_class(&h->coh_attr_guard, &cl_lock_guard_class); - cfs_lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class); + spin_lock_init(&h->coh_page_guard); + spin_lock_init(&h->coh_lock_guard); + spin_lock_init(&h->coh_attr_guard); + lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class); + lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class); + lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class); h->coh_pages = 0; /* XXX hard coded GFP_* mask. */ INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC); CFS_INIT_LIST_HEAD(&h->coh_locks); + h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8); } RETURN(result); } @@ -184,9 +181,9 @@ EXPORT_SYMBOL(cl_object_top); * * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get(). */ -static cfs_spinlock_t *cl_object_attr_guard(struct cl_object *o) +static spinlock_t *cl_object_attr_guard(struct cl_object *o) { - return &cl_object_header(cl_object_top(o))->coh_attr_guard; + return &cl_object_header(cl_object_top(o))->coh_attr_guard; } /** @@ -198,7 +195,7 @@ static cfs_spinlock_t *cl_object_attr_guard(struct cl_object *o) */ void cl_object_attr_lock(struct cl_object *o) { - cfs_spin_lock(cl_object_attr_guard(o)); + spin_lock(cl_object_attr_guard(o)); } EXPORT_SYMBOL(cl_object_attr_lock); @@ -207,7 +204,7 @@ EXPORT_SYMBOL(cl_object_attr_lock); */ void cl_object_attr_unlock(struct cl_object *o) { - cfs_spin_unlock(cl_object_attr_guard(o)); + spin_unlock(cl_object_attr_guard(o)); } EXPORT_SYMBOL(cl_object_attr_unlock); @@ -348,7 +345,7 @@ void cl_object_kill(const struct lu_env *env, struct cl_object *obj) LASSERT(hdr->coh_tree.rnode == NULL); LASSERT(hdr->coh_pages == 0); - cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags); + set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags); /* * Destroy all locks. Object destruction (including cl_inode_fini()) * cannot cancel the locks, because in the case of a local client, @@ -377,47 +374,50 @@ EXPORT_SYMBOL(cl_object_prune); */ int cl_object_has_locks(struct cl_object *obj) { - struct cl_object_header *head = cl_object_header(obj); - int has; + struct cl_object_header *head = cl_object_header(obj); + int has; - cfs_spin_lock(&head->coh_lock_guard); - has = cfs_list_empty(&head->coh_locks); - cfs_spin_unlock(&head->coh_lock_guard); + spin_lock(&head->coh_lock_guard); + has = cfs_list_empty(&head->coh_locks); + spin_unlock(&head->coh_lock_guard); - return (has == 0); + return (has == 0); } EXPORT_SYMBOL(cl_object_has_locks); void cache_stats_init(struct cache_stats *cs, const char *name) { + int i; + cs->cs_name = name; - cfs_atomic_set(&cs->cs_lookup, 0); - cfs_atomic_set(&cs->cs_hit, 0); - cfs_atomic_set(&cs->cs_total, 0); - cfs_atomic_set(&cs->cs_busy, 0); + for (i = 0; i < CS_NR; i++) + cfs_atomic_set(&cs->cs_stats[i], 0); } int cache_stats_print(const struct cache_stats *cs, char *page, int count, int h) { - int nob = 0; -/* - lookup hit total cached create - env: ...... ...... ...... ...... ...... -*/ - if (h) - nob += snprintf(page, count, - " lookup hit total busy create\n"); - - nob += snprintf(page + nob, count - nob, - "%5.5s: %6u %6u %6u %6u %6u", - cs->cs_name, - cfs_atomic_read(&cs->cs_lookup), - cfs_atomic_read(&cs->cs_hit), - cfs_atomic_read(&cs->cs_total), - cfs_atomic_read(&cs->cs_busy), - cfs_atomic_read(&cs->cs_created)); - return nob; + int nob = 0; + int i; + /* + * lookup hit total cached create + * env: ...... ...... ...... ...... ...... + */ + if (h) { + const char *names[CS_NR] = CS_NAMES; + + nob += snprintf(page + nob, count - nob, "%6s", " "); + for (i = 0; i < CS_NR; i++) + nob += snprintf(page + nob, count - nob, + "%8s", names[i]); + nob += snprintf(page + nob, count - nob, "\n"); + } + + nob += snprintf(page + nob, count - nob, "%5.5s:", cs->cs_name); + for (i = 0; i < CS_NR; i++) + nob += snprintf(page + nob, count - nob, "%8u", + cfs_atomic_read(&cs->cs_stats[i])); + return nob; } /** @@ -455,11 +455,7 @@ EXPORT_SYMBOL(cl_site_fini); static struct cache_stats cl_env_stats = { .cs_name = "envs", - .cs_created = CFS_ATOMIC_INIT(0), - .cs_lookup = CFS_ATOMIC_INIT(0), - .cs_hit = CFS_ATOMIC_INIT(0), - .cs_total = CFS_ATOMIC_INIT(0), - .cs_busy = CFS_ATOMIC_INIT(0) + .cs_stats = { CFS_ATOMIC_INIT(0), } }; /** @@ -519,26 +515,51 @@ EXPORT_SYMBOL(cl_site_stats_print); * */ -static CFS_LIST_HEAD(cl_envs); -static unsigned cl_envs_cached_nr = 0; -static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit - * for now. */ -static cfs_spinlock_t cl_envs_guard = CFS_SPIN_LOCK_UNLOCKED; +/** + * The most efficient way is to store cl_env pointer in task specific + * structures. On Linux, it wont' be easy to use task_struct->journal_info + * because Lustre code may call into other fs which has certain assumptions + * about journal_info. Currently following fields in task_struct are identified + * can be used for this purpose: + * - cl_env: for liblustre. + * - tux_info: ony on RedHat kernel. + * - ... + * \note As long as we use task_struct to store cl_env, we assume that once + * called into Lustre, we'll never call into the other part of the kernel + * which will use those fields in task_struct without explicitly exiting + * Lustre. + * + * If there's no space in task_struct is available, hash will be used. + * bz20044, bz22683. + */ struct cl_env { void *ce_magic; struct lu_env ce_lu; struct lu_context ce_ses; + +#ifdef LL_TASK_CL_ENV + void *ce_prev; +#else /** * This allows cl_env to be entered into cl_env_hash which implements * the current thread -> client environment lookup. */ cfs_hlist_node_t ce_node; +#endif /** - * Owner for the current cl_env, the key for cfs_hash. - * Now current thread pointer is stored. + * Owner for the current cl_env. + * + * If LL_TASK_CL_ENV is defined, this point to the owning cfs_current(), + * only for debugging purpose ; + * Otherwise hash is used, and this is the key for cfs_hash. + * Now current thread pid is stored. Note using thread pointer would + * lead to unbalanced hash because of its specific allocation locality + * and could be varied for different platforms and OSes, even different + * OS versions. */ void *ce_owner; + /* * Linkage into global list of all client environments. Used for * garbage collection. @@ -555,24 +576,39 @@ struct cl_env { void *ce_debug; }; -#define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.counter) +#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING +#define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.cs_stats[CS_##counter]) -#define CL_ENV_DEC(counter) \ - do { \ - LASSERT(cfs_atomic_read(&cl_env_stats.counter) > 0); \ - cfs_atomic_dec(&cl_env_stats.counter); \ - } while (0) +#define CL_ENV_DEC(counter) do { \ + LASSERT(cfs_atomic_read(&cl_env_stats.cs_stats[CS_##counter]) > 0); \ + cfs_atomic_dec(&cl_env_stats.cs_stats[CS_##counter]); \ +} while (0) +#else +#define CL_ENV_INC(counter) +#define CL_ENV_DEC(counter) +#endif -/***************************************************************************** - * Routins to use cfs_hash functionality to bind the current thread - * to cl_env +static void cl_env_init0(struct cl_env *cle, void *debug) +{ + LASSERT(cle->ce_ref == 0); + LASSERT(cle->ce_magic == &cl_env_init0); + LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL); + + cle->ce_ref = 1; + cle->ce_debug = debug; + CL_ENV_INC(busy); +} + + +#ifndef LL_TASK_CL_ENV +/* + * The implementation of using hash table to connect cl_env and thread */ -/** lustre hash to manage the cl_env for current thread */ static cfs_hash_t *cl_env_hash; -static void cl_env_init0(struct cl_env *cle, void *debug); -static unsigned cl_env_hops_hash(cfs_hash_t *lh, void *key, unsigned mask) +static unsigned cl_env_hops_hash(cfs_hash_t *lh, + const void *key, unsigned mask) { #if BITS_PER_LONG == 64 return cfs_hash_u64_hash((__u64)key, mask); @@ -588,7 +624,7 @@ static void *cl_env_hops_obj(cfs_hlist_node_t *hn) return (void *)cle; } -static int cl_env_hops_compare(void *key, cfs_hlist_node_t *hn) +static int cl_env_hops_keycmp(const void *key, cfs_hlist_node_t *hn) { struct cl_env *cle = cl_env_hops_obj(hn); @@ -596,18 +632,26 @@ static int cl_env_hops_compare(void *key, cfs_hlist_node_t *hn) return (key == cle->ce_owner); } +static void cl_env_hops_noop(cfs_hash_t *hs, cfs_hlist_node_t *hn) +{ + struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node); + LASSERT(cle->ce_magic == &cl_env_init0); +} + static cfs_hash_ops_t cl_env_hops = { - .hs_hash = cl_env_hops_hash, - .hs_compare = cl_env_hops_compare, - .hs_key = cl_env_hops_obj, - .hs_get = cl_env_hops_obj, - .hs_put = cl_env_hops_obj, + .hs_hash = cl_env_hops_hash, + .hs_key = cl_env_hops_obj, + .hs_keycmp = cl_env_hops_keycmp, + .hs_object = cl_env_hops_obj, + .hs_get = cl_env_hops_noop, + .hs_put_locked = cl_env_hops_noop, }; static inline struct cl_env *cl_env_fetch(void) { struct cl_env *cle; - cle = cfs_hash_lookup(cl_env_hash, cfs_current()); + + cle = cfs_hash_lookup(cl_env_hash, (void *) (long) cfs_current()->pid); LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0)); return cle; } @@ -616,42 +660,91 @@ static inline void cl_env_attach(struct cl_env *cle) { if (cle) { int rc; + LASSERT(cle->ce_owner == NULL); - cle->ce_owner = cfs_current(); + cle->ce_owner = (void *) (long) cfs_current()->pid; rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner, - &cle->ce_node); + &cle->ce_node); LASSERT(rc == 0); } } -static inline struct cl_env *cl_env_detach(struct cl_env *cle) +static inline void cl_env_do_detach(struct cl_env *cle) { - if (cle == NULL) - cle = cl_env_fetch(); - if (cle && cle->ce_owner) { - void *cookie; - LASSERT(cle->ce_owner == cfs_current()); - cookie = cfs_hash_del(cl_env_hash, cle->ce_owner, - &cle->ce_node); - cle->ce_owner = NULL; - LASSERT(cookie == cle); - } + void *cookie; + + LASSERT(cle->ce_owner == (void *) (long) cfs_current()->pid); + cookie = cfs_hash_del(cl_env_hash, cle->ce_owner, + &cle->ce_node); + LASSERT(cookie == cle); + cle->ce_owner = NULL; +} + +static int cl_env_store_init(void) { + cl_env_hash = cfs_hash_create("cl_env", + HASH_CL_ENV_BITS, HASH_CL_ENV_BITS, + HASH_CL_ENV_BKT_BITS, 0, + CFS_HASH_MIN_THETA, + CFS_HASH_MAX_THETA, + &cl_env_hops, + CFS_HASH_RW_BKTLOCK); + return cl_env_hash != NULL ? 0 :-ENOMEM; +} + +static void cl_env_store_fini(void) { + cfs_hash_putref(cl_env_hash); +} + +#else /* LL_TASK_CL_ENV */ +/* + * The implementation of store cl_env directly in thread structure. + */ + +static inline struct cl_env *cl_env_fetch(void) +{ + struct cl_env *cle; + + cle = cfs_current()->LL_TASK_CL_ENV; + if (cle && cle->ce_magic != &cl_env_init0) + cle = NULL; return cle; } -/* ----------------------- hash routines end ---------------------------- */ -static void cl_env_init0(struct cl_env *cle, void *debug) +static inline void cl_env_attach(struct cl_env *cle) { - LASSERT(cle->ce_ref == 0); - LASSERT(cle->ce_magic == &cl_env_init0); - LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL); + if (cle) { + LASSERT(cle->ce_owner == NULL); + cle->ce_owner = cfs_current(); + cle->ce_prev = cfs_current()->LL_TASK_CL_ENV; + cfs_current()->LL_TASK_CL_ENV = cle; + } +} - cle->ce_ref = 1; - cle->ce_debug = debug; - CL_ENV_INC(cs_busy); +static inline void cl_env_do_detach(struct cl_env *cle) +{ + LASSERT(cle->ce_owner == cfs_current()); + LASSERT(cfs_current()->LL_TASK_CL_ENV == cle); + cfs_current()->LL_TASK_CL_ENV = cle->ce_prev; + cle->ce_owner = NULL; +} + +static int cl_env_store_init(void) { return 0; } +static void cl_env_store_fini(void) { } + +#endif /* LL_TASK_CL_ENV */ + +static inline struct cl_env *cl_env_detach(struct cl_env *cle) +{ + if (cle == NULL) + cle = cl_env_fetch(); + + if (cle && cle->ce_owner) + cl_env_do_detach(cle); + + return cle; } -static struct lu_env *cl_env_new(__u32 tags, void *debug) +static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) { struct lu_env *env; struct cl_env *cle; @@ -663,9 +756,10 @@ static struct lu_env *cl_env_new(__u32 tags, void *debug) CFS_INIT_LIST_HEAD(&cle->ce_linkage); cle->ce_magic = &cl_env_init0; env = &cle->ce_lu; - rc = lu_env_init(env, LCT_CL_THREAD|tags); + rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags); if (rc == 0) { - rc = lu_context_init(&cle->ce_ses, LCT_SESSION|tags); + rc = lu_context_init(&cle->ce_ses, + LCT_SESSION | ses_tags); if (rc == 0) { lu_context_enter(&cle->ce_ses); env->le_ses = &cle->ce_ses; @@ -677,8 +771,8 @@ static struct lu_env *cl_env_new(__u32 tags, void *debug) OBD_SLAB_FREE_PTR(cle, cl_env_kmem); env = ERR_PTR(rc); } else { - CL_ENV_INC(cs_created); - CL_ENV_INC(cs_total); + CL_ENV_INC(create); + CL_ENV_INC(total); } } else env = ERR_PTR(-ENOMEM); @@ -687,45 +781,12 @@ static struct lu_env *cl_env_new(__u32 tags, void *debug) static void cl_env_fini(struct cl_env *cle) { - CL_ENV_DEC(cs_total); + CL_ENV_DEC(total); lu_context_fini(&cle->ce_lu.le_ctx); lu_context_fini(&cle->ce_ses); OBD_SLAB_FREE_PTR(cle, cl_env_kmem); } -static struct lu_env *cl_env_obtain(void *debug) -{ - struct cl_env *cle; - struct lu_env *env; - - ENTRY; - cfs_spin_lock(&cl_envs_guard); - LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs))); - if (cl_envs_cached_nr > 0) { - int rc; - - cle = container_of(cl_envs.next, struct cl_env, ce_linkage); - cfs_list_del_init(&cle->ce_linkage); - cl_envs_cached_nr--; - cfs_spin_unlock(&cl_envs_guard); - - env = &cle->ce_lu; - rc = lu_env_refill(env); - if (rc == 0) { - cl_env_init0(cle, debug); - lu_context_enter(&env->le_ctx); - lu_context_enter(&cle->ce_ses); - } else { - cl_env_fini(cle); - env = ERR_PTR(rc); - } - } else { - cfs_spin_unlock(&cl_envs_guard); - env = cl_env_new(0, debug); - } - RETURN(env); -} - static inline struct cl_env *cl_env_container(struct lu_env *env) { return container_of(env, struct cl_env, ce_lu); @@ -736,7 +797,7 @@ struct lu_env *cl_env_peek(int *refcheck) struct lu_env *env; struct cl_env *cle; - CL_ENV_INC(cs_lookup); + CL_ENV_INC(lookup); /* check that we don't go far from untrusted pointer */ CLASSERT(offsetof(struct cl_env, ce_magic) == 0); @@ -744,11 +805,11 @@ struct lu_env *cl_env_peek(int *refcheck) env = NULL; cle = cl_env_fetch(); if (cle != NULL) { - CL_ENV_INC(cs_hit); + CL_ENV_INC(hit); env = &cle->ce_lu; *refcheck = ++cle->ce_ref; } - CDEBUG(D_OTHER, "%i@%p\n", cle ? cle->ce_ref : 0, cle); + CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle); return env; } EXPORT_SYMBOL(cl_env_peek); @@ -757,8 +818,6 @@ EXPORT_SYMBOL(cl_env_peek); * Returns lu_env: if there already is an environment associated with the * current thread, it is returned, otherwise, new environment is allocated. * - * Allocations are amortized through the global cache of environments. - * * \param refcheck pointer to a counter used to detect environment leaks. In * the usual case cl_env_get() and cl_env_put() are called in the same lexical * scope and pointer to the same integer is passed as \a refcheck. This is @@ -772,14 +831,17 @@ struct lu_env *cl_env_get(int *refcheck) env = cl_env_peek(refcheck); if (env == NULL) { - env = cl_env_obtain(__builtin_return_address(0)); + env = cl_env_new(lu_context_tags_default, + lu_session_tags_default, + __builtin_return_address(0)); + if (!IS_ERR(env)) { struct cl_env *cle; cle = cl_env_container(env); cl_env_attach(cle); *refcheck = cle->ce_ref; - CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle); + CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); } } return env; @@ -796,13 +858,13 @@ struct lu_env *cl_env_alloc(int *refcheck, __u32 tags) struct lu_env *env; LASSERT(cl_env_peek(refcheck) == NULL); - env = cl_env_new(tags, __builtin_return_address(0)); + env = cl_env_new(tags, tags, __builtin_return_address(0)); if (!IS_ERR(env)) { struct cl_env *cle; cle = cl_env_container(env); *refcheck = cle->ce_ref; - CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle); + CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); } return env; } @@ -816,33 +878,6 @@ static void cl_env_exit(struct cl_env *cle) } /** - * Finalizes and frees a given number of cached environments. This is done to - * (1) free some memory (not currently hooked into VM), or (2) release - * references to modules. - */ -unsigned cl_env_cache_purge(unsigned nr) -{ - struct cl_env *cle; - - ENTRY; - cfs_spin_lock(&cl_envs_guard); - for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) { - cle = container_of(cl_envs.next, struct cl_env, ce_linkage); - cfs_list_del_init(&cle->ce_linkage); - LASSERT(cl_envs_cached_nr > 0); - cl_envs_cached_nr--; - cfs_spin_unlock(&cl_envs_guard); - - cl_env_fini(cle); - cfs_spin_lock(&cl_envs_guard); - } - LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs))); - cfs_spin_unlock(&cl_envs_guard); - RETURN(nr); -} -EXPORT_SYMBOL(cl_env_cache_purge); - -/** * Release an environment. * * Decrement \a env reference counter. When counter drops to 0, nothing in @@ -858,28 +893,14 @@ void cl_env_put(struct lu_env *env, int *refcheck) LASSERT(cle->ce_ref > 0); LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck)); - CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle); + CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); if (--cle->ce_ref == 0) { - CL_ENV_DEC(cs_busy); + CL_ENV_DEC(busy); cl_env_detach(cle); cle->ce_debug = NULL; cl_env_exit(cle); - /* - * Don't bother to take a lock here. - * - * Return environment to the cache only when it was allocated - * with the standard tags. - */ - if (cl_envs_cached_nr < cl_envs_cached_max && - (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD && - (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) { - cfs_spin_lock(&cl_envs_guard); - cfs_list_add(&cle->ce_linkage, &cl_envs); - cl_envs_cached_nr++; - cfs_spin_unlock(&cl_envs_guard); - } else - cl_env_fini(cle); - } + cl_env_fini(cle); + } } EXPORT_SYMBOL(cl_env_put); @@ -919,7 +940,7 @@ void cl_env_implant(struct lu_env *env, int *refcheck) cl_env_attach(cle); cl_env_get(refcheck); - CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle); + CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); } EXPORT_SYMBOL(cl_env_implant); @@ -932,7 +953,7 @@ void cl_env_unplant(struct lu_env *env, int *refcheck) LASSERT(cle->ce_ref > 1); - CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle); + CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); cl_env_detach(cle); cl_env_put(env, refcheck); @@ -1139,23 +1160,36 @@ int cl_global_init(void) { int result; - cl_env_hash = cfs_hash_create("cl_env", 8, 10, &cl_env_hops, - CFS_HASH_REHASH); - if (cl_env_hash == NULL) - return -ENOMEM; + result = cl_env_store_init(); + if (result) + return result; result = lu_kmem_init(cl_object_caches); - if (result == 0) { - LU_CONTEXT_KEY_INIT(&cl_key); - result = lu_context_key_register(&cl_key); - if (result == 0) { - result = cl_lock_init(); - if (result == 0) - result = cl_page_init(); - } - } if (result) - cfs_hash_putref(cl_env_hash); + goto out_store; + + LU_CONTEXT_KEY_INIT(&cl_key); + result = lu_context_key_register(&cl_key); + if (result) + goto out_kmem; + + result = cl_lock_init(); + if (result) + goto out_context; + + result = cl_page_init(); + if (result) + goto out_lock; + + return 0; +out_lock: + cl_lock_fini(); +out_context: + lu_context_key_degister(&cl_key); +out_kmem: + lu_kmem_fini(cl_object_caches); +out_store: + cl_env_store_fini(); return result; } @@ -1168,5 +1202,5 @@ void cl_global_fini(void) cl_page_fini(); lu_context_key_degister(&cl_key); lu_kmem_fini(cl_object_caches); - cfs_hash_putref(cl_env_hash); + cl_env_store_fini(); }