X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Fcl_object.c;h=5a2da3ee2eca97cd822f372961497864061225cb;hb=e27a7ff8f337984a4e7f8d31e419528b71c3777b;hp=1d6572d86098005ca10a6013156d5cc0eeb84ff5;hpb=56293e79aec17670718dcaf50a733ec5119210d2;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/cl_object.c b/lustre/obdclass/cl_object.c index 1d6572d..5a2da3e 100644 --- a/lustre/obdclass/cl_object.c +++ b/lustre/obdclass/cl_object.c @@ -76,6 +76,8 @@ static cfs_lock_class_key_t cl_lock_guard_class; /** Lock class of cl_object_header::coh_attr_guard */ static cfs_lock_class_key_t cl_attr_guard_class; +extern __u32 lu_context_tags_default; +extern __u32 lu_session_tags_default; /** * Initialize cl_object_header. */ @@ -519,6 +521,24 @@ EXPORT_SYMBOL(cl_site_stats_print); * */ +/** + * The most efficient way is to store cl_env pointer in task specific + * structures. On Linux, it wont' be easy to use task_struct->journal_info + * because Lustre code may call into other fs which has certain assumptions + * about journal_info. Currently following fields in task_struct are identified + * can be used for this purpose: + * - cl_env: for liblustre. + * - tux_info: ony on RedHat kernel. + * - ... + * \note As long as we use task_struct to store cl_env, we assume that once + * called into Lustre, we'll never call into the other part of the kernel + * which will use those fields in task_struct without explicitly exiting + * Lustre. + * + * If there's no space in task_struct is available, hash will be used. + * bz20044, bz22683. + */ + static CFS_LIST_HEAD(cl_envs); static unsigned cl_envs_cached_nr = 0; static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit @@ -529,19 +549,29 @@ struct cl_env { void *ce_magic; struct lu_env ce_lu; struct lu_context ce_ses; + +#ifdef LL_TASK_CL_ENV + void *ce_prev; +#else /** * This allows cl_env to be entered into cl_env_hash which implements * the current thread -> client environment lookup. */ cfs_hlist_node_t ce_node; +#endif /** - * Owner for the current cl_env, the key for cfs_hash. + * Owner for the current cl_env. + * + * If LL_TASK_CL_ENV is defined, this point to the owning cfs_current(), + * only for debugging purpose ; + * Otherwise hash is used, and this is the key for cfs_hash. * Now current thread pid is stored. Note using thread pointer would * lead to unbalanced hash because of its specific allocation locality * and could be varied for different platforms and OSes, even different * OS versions. */ void *ce_owner; + /* * Linkage into global list of all client environments. Used for * garbage collection. @@ -566,16 +596,27 @@ struct cl_env { cfs_atomic_dec(&cl_env_stats.counter); \ } while (0) -/***************************************************************************** - * Routins to use cfs_hash functionality to bind the current thread - * to cl_env +static void cl_env_init0(struct cl_env *cle, void *debug) +{ + LASSERT(cle->ce_ref == 0); + LASSERT(cle->ce_magic == &cl_env_init0); + LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL); + + cle->ce_ref = 1; + cle->ce_debug = debug; + CL_ENV_INC(cs_busy); +} + + +#ifndef LL_TASK_CL_ENV +/* + * The implementation of using hash table to connect cl_env and thread */ -/** lustre hash to manage the cl_env for current thread */ static cfs_hash_t *cl_env_hash; -static void cl_env_init0(struct cl_env *cle, void *debug); -static unsigned cl_env_hops_hash(cfs_hash_t *lh, void *key, unsigned mask) +static unsigned cl_env_hops_hash(cfs_hash_t *lh, + const void *key, unsigned mask) { #if BITS_PER_LONG == 64 return cfs_hash_u64_hash((__u64)key, mask); @@ -591,7 +632,7 @@ static void *cl_env_hops_obj(cfs_hlist_node_t *hn) return (void *)cle; } -static int cl_env_hops_compare(void *key, cfs_hlist_node_t *hn) +static int cl_env_hops_keycmp(const void *key, cfs_hlist_node_t *hn) { struct cl_env *cle = cl_env_hops_obj(hn); @@ -599,17 +640,25 @@ static int cl_env_hops_compare(void *key, cfs_hlist_node_t *hn) return (key == cle->ce_owner); } +static void cl_env_hops_noop(cfs_hash_t *hs, cfs_hlist_node_t *hn) +{ + struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node); + LASSERT(cle->ce_magic == &cl_env_init0); +} + static cfs_hash_ops_t cl_env_hops = { - .hs_hash = cl_env_hops_hash, - .hs_compare = cl_env_hops_compare, - .hs_key = cl_env_hops_obj, - .hs_get = cl_env_hops_obj, - .hs_put = cl_env_hops_obj, + .hs_hash = cl_env_hops_hash, + .hs_key = cl_env_hops_obj, + .hs_keycmp = cl_env_hops_keycmp, + .hs_object = cl_env_hops_obj, + .hs_get = cl_env_hops_noop, + .hs_put_locked = cl_env_hops_noop, }; static inline struct cl_env *cl_env_fetch(void) { struct cl_env *cle; + cle = cfs_hash_lookup(cl_env_hash, (void *) (long) cfs_current()->pid); LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0)); return cle; @@ -619,6 +668,7 @@ static inline void cl_env_attach(struct cl_env *cle) { if (cle) { int rc; + LASSERT(cle->ce_owner == NULL); cle->ce_owner = (void *) (long) cfs_current()->pid; rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner, @@ -627,34 +677,82 @@ static inline void cl_env_attach(struct cl_env *cle) } } -static inline struct cl_env *cl_env_detach(struct cl_env *cle) +static inline void cl_env_do_detach(struct cl_env *cle) { - if (cle == NULL) - cle = cl_env_fetch(); - if (cle && cle->ce_owner) { - void *cookie; - LASSERT(cle->ce_owner == (void *) (long) cfs_current()->pid); - cookie = cfs_hash_del(cl_env_hash, cle->ce_owner, - &cle->ce_node); - cle->ce_owner = NULL; - LASSERT(cookie == cle); - } + void *cookie; + + LASSERT(cle->ce_owner == (void *) (long) cfs_current()->pid); + cookie = cfs_hash_del(cl_env_hash, cle->ce_owner, + &cle->ce_node); + LASSERT(cookie == cle); + cle->ce_owner = NULL; +} + +static int cl_env_store_init(void) { + cl_env_hash = cfs_hash_create("cl_env", + HASH_CL_ENV_BITS, HASH_CL_ENV_BITS, + HASH_CL_ENV_BKT_BITS, 0, + CFS_HASH_MIN_THETA, + CFS_HASH_MAX_THETA, + &cl_env_hops, + CFS_HASH_RW_BKTLOCK); + return cl_env_hash != NULL ? 0 :-ENOMEM; +} + +static void cl_env_store_fini(void) { + cfs_hash_putref(cl_env_hash); +} + +#else /* LL_TASK_CL_ENV */ +/* + * The implementation of store cl_env directly in thread structure. + */ + +static inline struct cl_env *cl_env_fetch(void) +{ + struct cl_env *cle; + + cle = cfs_current()->LL_TASK_CL_ENV; + if (cle && cle->ce_magic != &cl_env_init0) + cle = NULL; return cle; } -/* ----------------------- hash routines end ---------------------------- */ -static void cl_env_init0(struct cl_env *cle, void *debug) +static inline void cl_env_attach(struct cl_env *cle) { - LASSERT(cle->ce_ref == 0); - LASSERT(cle->ce_magic == &cl_env_init0); - LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL); + if (cle) { + LASSERT(cle->ce_owner == NULL); + cle->ce_owner = cfs_current(); + cle->ce_prev = cfs_current()->LL_TASK_CL_ENV; + cfs_current()->LL_TASK_CL_ENV = cle; + } +} - cle->ce_ref = 1; - cle->ce_debug = debug; - CL_ENV_INC(cs_busy); +static inline void cl_env_do_detach(struct cl_env *cle) +{ + LASSERT(cle->ce_owner == cfs_current()); + LASSERT(cfs_current()->LL_TASK_CL_ENV == cle); + cfs_current()->LL_TASK_CL_ENV = cle->ce_prev; + cle->ce_owner = NULL; +} + +static int cl_env_store_init(void) { return 0; } +static void cl_env_store_fini(void) { } + +#endif /* LL_TASK_CL_ENV */ + +static inline struct cl_env *cl_env_detach(struct cl_env *cle) +{ + if (cle == NULL) + cle = cl_env_fetch(); + + if (cle && cle->ce_owner) + cl_env_do_detach(cle); + + return cle; } -static struct lu_env *cl_env_new(__u32 tags, void *debug) +static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) { struct lu_env *env; struct cl_env *cle; @@ -666,9 +764,10 @@ static struct lu_env *cl_env_new(__u32 tags, void *debug) CFS_INIT_LIST_HEAD(&cle->ce_linkage); cle->ce_magic = &cl_env_init0; env = &cle->ce_lu; - rc = lu_env_init(env, LCT_CL_THREAD|tags); + rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags); if (rc == 0) { - rc = lu_context_init(&cle->ce_ses, LCT_SESSION|tags); + rc = lu_context_init(&cle->ce_ses, + LCT_SESSION | ses_tags); if (rc == 0) { lu_context_enter(&cle->ce_ses); env->le_ses = &cle->ce_ses; @@ -724,7 +823,8 @@ static struct lu_env *cl_env_obtain(void *debug) } } else { cfs_spin_unlock(&cl_envs_guard); - env = cl_env_new(0, debug); + env = cl_env_new(lu_context_tags_default, + lu_session_tags_default, debug); } RETURN(env); } @@ -799,7 +899,7 @@ struct lu_env *cl_env_alloc(int *refcheck, __u32 tags) struct lu_env *env; LASSERT(cl_env_peek(refcheck) == NULL); - env = cl_env_new(tags, __builtin_return_address(0)); + env = cl_env_new(tags, tags, __builtin_return_address(0)); if (!IS_ERR(env)) { struct cl_env *cle; @@ -1142,23 +1242,36 @@ int cl_global_init(void) { int result; - cl_env_hash = cfs_hash_create("cl_env", 8, 10, &cl_env_hops, - CFS_HASH_REHASH); - if (cl_env_hash == NULL) - return -ENOMEM; + result = cl_env_store_init(); + if (result) + return result; result = lu_kmem_init(cl_object_caches); - if (result == 0) { - LU_CONTEXT_KEY_INIT(&cl_key); - result = lu_context_key_register(&cl_key); - if (result == 0) { - result = cl_lock_init(); - if (result == 0) - result = cl_page_init(); - } - } if (result) - cfs_hash_putref(cl_env_hash); + goto out_store; + + LU_CONTEXT_KEY_INIT(&cl_key); + result = lu_context_key_register(&cl_key); + if (result) + goto out_kmem; + + result = cl_lock_init(); + if (result) + goto out_context; + + result = cl_page_init(); + if (result) + goto out_lock; + + return 0; +out_lock: + cl_lock_fini(); +out_context: + lu_context_key_degister(&cl_key); +out_kmem: + lu_kmem_fini(cl_object_caches); +out_store: + cl_env_store_fini(); return result; } @@ -1171,5 +1284,5 @@ void cl_global_fini(void) cl_page_fini(); lu_context_key_degister(&cl_key); lu_kmem_fini(cl_object_caches); - cfs_hash_putref(cl_env_hash); + cl_env_store_fini(); }