X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fobdclass%2Fcl_object.c;h=8f29926b5929d0c0e2e6f011c6149384c9d99685;hp=248cccead76cba6677b6c8de36c26a21a5c839aa;hb=93fe562c5dd3829939e3bc0533918f66b19776a4;hpb=9fb46705ae86aa2c0ac29427f0ff24f923560eb7 diff --git a/lustre/obdclass/cl_object.c b/lustre/obdclass/cl_object.c index 248ccce..8f29926 100644 --- a/lustre/obdclass/cl_object.c +++ b/lustre/obdclass/cl_object.c @@ -27,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -58,13 +58,10 @@ #include #include #include /* for cfs_hash stuff */ -/* lu_time_global_{init,fini}() */ -#include - #include #include "cl_internal.h" -static cfs_mem_cache_t *cl_env_kmem; +static struct kmem_cache *cl_env_kmem; /** Lock class of cl_object_header::coh_page_guard */ static struct lock_class_key cl_page_guard_class; @@ -95,6 +92,7 @@ int cl_object_header_init(struct cl_object_header *h) /* XXX hard coded GFP_* mask. */ INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC); CFS_INIT_LIST_HEAD(&h->coh_locks); + h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8); } RETURN(result); } @@ -122,7 +120,7 @@ struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd, const struct lu_fid *fid, const struct cl_object_conf *c) { - cfs_might_sleep(); + might_sleep(); return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu)); } EXPORT_SYMBOL(cl_object_find); @@ -220,11 +218,11 @@ EXPORT_SYMBOL(cl_object_attr_unlock); int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr) { - struct lu_object_header *top; - int result; + struct lu_object_header *top; + int result; - LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj)); - ENTRY; + LASSERT(spin_is_locked(cl_object_attr_guard(obj))); + ENTRY; top = obj->co_lu.lo_header; result = 0; @@ -252,11 +250,11 @@ EXPORT_SYMBOL(cl_object_attr_get); int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj, const struct cl_attr *attr, unsigned v) { - struct lu_object_header *top; - int result; + struct lu_object_header *top; + int result; - LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj)); - ENTRY; + LASSERT(spin_is_locked(cl_object_attr_guard(obj))); + ENTRY; top = obj->co_lu.lo_header; result = 0; @@ -389,34 +387,37 @@ EXPORT_SYMBOL(cl_object_has_locks); void cache_stats_init(struct cache_stats *cs, const char *name) { + int i; + cs->cs_name = name; - cfs_atomic_set(&cs->cs_lookup, 0); - cfs_atomic_set(&cs->cs_hit, 0); - cfs_atomic_set(&cs->cs_total, 0); - cfs_atomic_set(&cs->cs_busy, 0); + for (i = 0; i < CS_NR; i++) + cfs_atomic_set(&cs->cs_stats[i], 0); } int cache_stats_print(const struct cache_stats *cs, char *page, int count, int h) { - int nob = 0; -/* - lookup hit total cached create - env: ...... ...... ...... ...... ...... -*/ - if (h) - nob += snprintf(page, count, - " lookup hit total busy create\n"); - - nob += snprintf(page + nob, count - nob, - "%5.5s: %6u %6u %6u %6u %6u", - cs->cs_name, - cfs_atomic_read(&cs->cs_lookup), - cfs_atomic_read(&cs->cs_hit), - cfs_atomic_read(&cs->cs_total), - cfs_atomic_read(&cs->cs_busy), - cfs_atomic_read(&cs->cs_created)); - return nob; + int nob = 0; + int i; + /* + * lookup hit total cached create + * env: ...... ...... ...... ...... ...... + */ + if (h) { + const char *names[CS_NR] = CS_NAMES; + + nob += snprintf(page + nob, count - nob, "%6s", " "); + for (i = 0; i < CS_NR; i++) + nob += snprintf(page + nob, count - nob, + "%8s", names[i]); + nob += snprintf(page + nob, count - nob, "\n"); + } + + nob += snprintf(page + nob, count - nob, "%5.5s:", cs->cs_name); + for (i = 0; i < CS_NR; i++) + nob += snprintf(page + nob, count - nob, "%8u", + cfs_atomic_read(&cs->cs_stats[i])); + return nob; } /** @@ -454,11 +455,7 @@ EXPORT_SYMBOL(cl_site_fini); static struct cache_stats cl_env_stats = { .cs_name = "envs", - .cs_created = CFS_ATOMIC_INIT(0), - .cs_lookup = CFS_ATOMIC_INIT(0), - .cs_hit = CFS_ATOMIC_INIT(0), - .cs_total = CFS_ATOMIC_INIT(0), - .cs_busy = CFS_ATOMIC_INIT(0) + .cs_stats = { CFS_ATOMIC_INIT(0), } }; /** @@ -556,18 +553,18 @@ struct cl_env { */ cfs_hlist_node_t ce_node; #endif - /** - * Owner for the current cl_env. - * - * If LL_TASK_CL_ENV is defined, this point to the owning cfs_current(), - * only for debugging purpose ; - * Otherwise hash is used, and this is the key for cfs_hash. - * Now current thread pid is stored. Note using thread pointer would - * lead to unbalanced hash because of its specific allocation locality - * and could be varied for different platforms and OSes, even different - * OS versions. - */ - void *ce_owner; + /** + * Owner for the current cl_env. + * + * If LL_TASK_CL_ENV is defined, this point to the owning current, + * only for debugging purpose ; + * Otherwise hash is used, and this is the key for cfs_hash. + * Now current thread pid is stored. Note using thread pointer would + * lead to unbalanced hash because of its specific allocation locality + * and could be varied for different platforms and OSes, even different + * OS versions. + */ + void *ce_owner; /* * Linkage into global list of all client environments. Used for @@ -585,13 +582,17 @@ struct cl_env { void *ce_debug; }; -#define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.counter) +#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING +#define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.cs_stats[CS_##counter]) -#define CL_ENV_DEC(counter) \ - do { \ - LASSERT(cfs_atomic_read(&cl_env_stats.counter) > 0); \ - cfs_atomic_dec(&cl_env_stats.counter); \ - } while (0) +#define CL_ENV_DEC(counter) do { \ + LASSERT(cfs_atomic_read(&cl_env_stats.cs_stats[CS_##counter]) > 0); \ + cfs_atomic_dec(&cl_env_stats.cs_stats[CS_##counter]); \ +} while (0) +#else +#define CL_ENV_INC(counter) +#define CL_ENV_DEC(counter) +#endif static void cl_env_init0(struct cl_env *cle, void *debug) { @@ -601,7 +602,7 @@ static void cl_env_init0(struct cl_env *cle, void *debug) cle->ce_ref = 1; cle->ce_debug = debug; - CL_ENV_INC(cs_busy); + CL_ENV_INC(busy); } @@ -654,35 +655,35 @@ static cfs_hash_ops_t cl_env_hops = { static inline struct cl_env *cl_env_fetch(void) { - struct cl_env *cle; + struct cl_env *cle; - cle = cfs_hash_lookup(cl_env_hash, (void *) (long) cfs_current()->pid); - LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0)); - return cle; + cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid); + LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0)); + return cle; } static inline void cl_env_attach(struct cl_env *cle) { - if (cle) { - int rc; + if (cle) { + int rc; - LASSERT(cle->ce_owner == NULL); - cle->ce_owner = (void *) (long) cfs_current()->pid; - rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner, - &cle->ce_node); - LASSERT(rc == 0); - } + LASSERT(cle->ce_owner == NULL); + cle->ce_owner = (void *) (long) current->pid; + rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner, + &cle->ce_node); + LASSERT(rc == 0); + } } static inline void cl_env_do_detach(struct cl_env *cle) { - void *cookie; + void *cookie; - LASSERT(cle->ce_owner == (void *) (long) cfs_current()->pid); - cookie = cfs_hash_del(cl_env_hash, cle->ce_owner, - &cle->ce_node); - LASSERT(cookie == cle); - cle->ce_owner = NULL; + LASSERT(cle->ce_owner == (void *) (long) current->pid); + cookie = cfs_hash_del(cl_env_hash, cle->ce_owner, + &cle->ce_node); + LASSERT(cookie == cle); + cle->ce_owner = NULL; } static int cl_env_store_init(void) { @@ -707,30 +708,30 @@ static void cl_env_store_fini(void) { static inline struct cl_env *cl_env_fetch(void) { - struct cl_env *cle; + struct cl_env *cle; - cle = cfs_current()->LL_TASK_CL_ENV; - if (cle && cle->ce_magic != &cl_env_init0) - cle = NULL; - return cle; + cle = current->LL_TASK_CL_ENV; + if (cle && cle->ce_magic != &cl_env_init0) + cle = NULL; + return cle; } static inline void cl_env_attach(struct cl_env *cle) { - if (cle) { - LASSERT(cle->ce_owner == NULL); - cle->ce_owner = cfs_current(); - cle->ce_prev = cfs_current()->LL_TASK_CL_ENV; - cfs_current()->LL_TASK_CL_ENV = cle; - } + if (cle) { + LASSERT(cle->ce_owner == NULL); + cle->ce_owner = current; + cle->ce_prev = current->LL_TASK_CL_ENV; + current->LL_TASK_CL_ENV = cle; + } } static inline void cl_env_do_detach(struct cl_env *cle) { - LASSERT(cle->ce_owner == cfs_current()); - LASSERT(cfs_current()->LL_TASK_CL_ENV == cle); - cfs_current()->LL_TASK_CL_ENV = cle->ce_prev; - cle->ce_owner = NULL; + LASSERT(cle->ce_owner == current); + LASSERT(current->LL_TASK_CL_ENV == cle); + current->LL_TASK_CL_ENV = cle->ce_prev; + cle->ce_owner = NULL; } static int cl_env_store_init(void) { return 0; } @@ -754,7 +755,7 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) struct lu_env *env; struct cl_env *cle; - OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, __GFP_IO); if (cle != NULL) { int rc; @@ -776,8 +777,8 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) OBD_SLAB_FREE_PTR(cle, cl_env_kmem); env = ERR_PTR(rc); } else { - CL_ENV_INC(cs_created); - CL_ENV_INC(cs_total); + CL_ENV_INC(create); + CL_ENV_INC(total); } } else env = ERR_PTR(-ENOMEM); @@ -786,7 +787,7 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) static void cl_env_fini(struct cl_env *cle) { - CL_ENV_DEC(cs_total); + CL_ENV_DEC(total); lu_context_fini(&cle->ce_lu.le_ctx); lu_context_fini(&cle->ce_ses); OBD_SLAB_FREE_PTR(cle, cl_env_kmem); @@ -836,7 +837,7 @@ struct lu_env *cl_env_peek(int *refcheck) struct lu_env *env; struct cl_env *cle; - CL_ENV_INC(cs_lookup); + CL_ENV_INC(lookup); /* check that we don't go far from untrusted pointer */ CLASSERT(offsetof(struct cl_env, ce_magic) == 0); @@ -844,7 +845,7 @@ struct lu_env *cl_env_peek(int *refcheck) env = NULL; cle = cl_env_fetch(); if (cle != NULL) { - CL_ENV_INC(cs_hit); + CL_ENV_INC(hit); env = &cle->ce_lu; *refcheck = ++cle->ce_ref; } @@ -960,7 +961,7 @@ void cl_env_put(struct lu_env *env, int *refcheck) CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); if (--cle->ce_ref == 0) { - CL_ENV_DEC(cs_busy); + CL_ENV_DEC(busy); cl_env_detach(cle); cle->ce_debug = NULL; cl_env_exit(cle);