X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fobdclass%2Fcl_object.c;h=357e73f566c04d5d76f2d54ad2f7d76fdb66d7cf;hp=5a2da3ee2eca97cd822f372961497864061225cb;hb=8701e7e4b5ec1b34700c95b9b6588f4745730b72;hpb=e27a7ff8f337984a4e7f8d31e419528b71c3777b;ds=sidebyside diff --git a/lustre/obdclass/cl_object.c b/lustre/obdclass/cl_object.c index 5a2da3e..357e73f 100644 --- a/lustre/obdclass/cl_object.c +++ b/lustre/obdclass/cl_object.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -28,6 +26,8 @@ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -36,6 +36,7 @@ * Client Lustre Object. * * Author: Nikita Danilov + * Author: Jinshan Xiong */ /* @@ -43,16 +44,12 @@ * * i_mutex * PG_locked - * ->coh_page_guard * ->coh_lock_guard * ->coh_attr_guard * ->ls_guard */ #define DEBUG_SUBSYSTEM S_CLASS -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #include /* class_put_type() */ @@ -61,20 +58,15 @@ #include #include #include /* for cfs_hash stuff */ -/* lu_time_global_{init,fini}() */ -#include - #include #include "cl_internal.h" -static cfs_mem_cache_t *cl_env_kmem; +static struct kmem_cache *cl_env_kmem; -/** Lock class of cl_object_header::coh_page_guard */ -static cfs_lock_class_key_t cl_page_guard_class; /** Lock class of cl_object_header::coh_lock_guard */ -static cfs_lock_class_key_t cl_lock_guard_class; +static struct lock_class_key cl_lock_guard_class; /** Lock class of cl_object_header::coh_attr_guard */ -static cfs_lock_class_key_t cl_attr_guard_class; +static struct lock_class_key cl_attr_guard_class; extern __u32 lu_context_tags_default; extern __u32 lu_session_tags_default; @@ -83,23 +75,19 @@ extern __u32 lu_session_tags_default; */ int cl_object_header_init(struct cl_object_header *h) { - int result; - - ENTRY; - result = lu_object_header_init(&h->coh_lu); - if (result == 0) { - cfs_spin_lock_init(&h->coh_page_guard); - cfs_spin_lock_init(&h->coh_lock_guard); - cfs_spin_lock_init(&h->coh_attr_guard); - cfs_lockdep_set_class(&h->coh_attr_guard, &cl_page_guard_class); - cfs_lockdep_set_class(&h->coh_attr_guard, &cl_lock_guard_class); - cfs_lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class); - h->coh_pages = 0; - /* XXX hard coded GFP_* mask. */ - INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC); - CFS_INIT_LIST_HEAD(&h->coh_locks); - } - RETURN(result); + int result; + + ENTRY; + result = lu_object_header_init(&h->coh_lu); + if (result == 0) { + spin_lock_init(&h->coh_lock_guard); + spin_lock_init(&h->coh_attr_guard); + lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class); + lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class); + INIT_LIST_HEAD(&h->coh_locks); + h->coh_page_bufsize = 0; + } + RETURN(result); } EXPORT_SYMBOL(cl_object_header_init); @@ -108,7 +96,7 @@ EXPORT_SYMBOL(cl_object_header_init); */ void cl_object_header_fini(struct cl_object_header *h) { - LASSERT(cfs_list_empty(&h->coh_locks)); + LASSERT(list_empty(&h->coh_locks)); lu_object_header_fini(&h->coh_lu); } EXPORT_SYMBOL(cl_object_header_fini); @@ -125,7 +113,7 @@ struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd, const struct lu_fid *fid, const struct cl_object_conf *c) { - cfs_might_sleep(); + might_sleep(); return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu)); } EXPORT_SYMBOL(cl_object_find); @@ -161,7 +149,7 @@ EXPORT_SYMBOL(cl_object_get); /** * Returns the top-object for a given \a o. * - * \see cl_page_top(), cl_io_top() + * \see cl_io_top() */ struct cl_object *cl_object_top(struct cl_object *o) { @@ -186,9 +174,9 @@ EXPORT_SYMBOL(cl_object_top); * * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get(). */ -static cfs_spinlock_t *cl_object_attr_guard(struct cl_object *o) +static spinlock_t *cl_object_attr_guard(struct cl_object *o) { - return &cl_object_header(cl_object_top(o))->coh_attr_guard; + return &cl_object_header(cl_object_top(o))->coh_attr_guard; } /** @@ -200,7 +188,7 @@ static cfs_spinlock_t *cl_object_attr_guard(struct cl_object *o) */ void cl_object_attr_lock(struct cl_object *o) { - cfs_spin_lock(cl_object_attr_guard(o)); + spin_lock(cl_object_attr_guard(o)); } EXPORT_SYMBOL(cl_object_attr_lock); @@ -209,7 +197,7 @@ EXPORT_SYMBOL(cl_object_attr_lock); */ void cl_object_attr_unlock(struct cl_object *o) { - cfs_spin_unlock(cl_object_attr_guard(o)); + spin_unlock(cl_object_attr_guard(o)); } EXPORT_SYMBOL(cl_object_attr_unlock); @@ -223,15 +211,15 @@ EXPORT_SYMBOL(cl_object_attr_unlock); int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr) { - struct lu_object_header *top; - int result; + struct lu_object_header *top; + int result; - LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj)); - ENTRY; + assert_spin_locked(cl_object_attr_guard(obj)); + ENTRY; top = obj->co_lu.lo_header; result = 0; - cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { + list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { if (obj->co_ops->coo_attr_get != NULL) { result = obj->co_ops->coo_attr_get(env, obj, attr); if (result != 0) { @@ -255,26 +243,25 @@ EXPORT_SYMBOL(cl_object_attr_get); int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj, const struct cl_attr *attr, unsigned v) { - struct lu_object_header *top; - int result; - - LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj)); - ENTRY; - - top = obj->co_lu.lo_header; - result = 0; - cfs_list_for_each_entry_reverse(obj, &top->loh_layers, - co_lu.lo_linkage) { - if (obj->co_ops->coo_attr_set != NULL) { - result = obj->co_ops->coo_attr_set(env, obj, attr, v); - if (result != 0) { - if (result > 0) - result = 0; - break; - } - } - } - RETURN(result); + struct lu_object_header *top; + int result; + + assert_spin_locked(cl_object_attr_guard(obj)); + ENTRY; + + top = obj->co_lu.lo_header; + result = 0; + list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) { + if (obj->co_ops->coo_attr_set != NULL) { + result = obj->co_ops->coo_attr_set(env, obj, attr, v); + if (result != 0) { + if (result > 0) + result = 0; + break; + } + } + } + RETURN(result); } EXPORT_SYMBOL(cl_object_attr_set); @@ -295,8 +282,7 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj, ENTRY; top = obj->co_lu.lo_header; result = 0; - cfs_list_for_each_entry_reverse(obj, &top->loh_layers, - co_lu.lo_linkage) { + list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) { if (obj->co_ops->coo_glimpse != NULL) { result = obj->co_ops->coo_glimpse(env, obj, lvb); if (result != 0) @@ -324,7 +310,7 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj, ENTRY; top = obj->co_lu.lo_header; result = 0; - cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { + list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { if (obj->co_ops->coo_conf_set != NULL) { result = obj->co_ops->coo_conf_set(env, obj, conf); if (result != 0) @@ -336,6 +322,33 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj, EXPORT_SYMBOL(cl_conf_set); /** + * Prunes caches of pages and locks for this object. + */ +void cl_object_prune(const struct lu_env *env, struct cl_object *obj) +{ + struct lu_object_header *top; + struct cl_object *o; + int result; + ENTRY; + + top = obj->co_lu.lo_header; + result = 0; + list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) { + if (o->co_ops->coo_prune != NULL) { + result = o->co_ops->coo_prune(env, o); + if (result != 0) + break; + } + } + + /* TODO: pruning locks will be moved into layers after cl_lock + * simplification is done */ + cl_locks_prune(env, obj, 1); + EXIT; +} +EXPORT_SYMBOL(cl_object_prune); + +/** * Helper function removing all object locks, and marking object for * deletion. All object pages must have been deleted at this point. * @@ -347,10 +360,8 @@ void cl_object_kill(const struct lu_env *env, struct cl_object *obj) struct cl_object_header *hdr; hdr = cl_object_header(obj); - LASSERT(hdr->coh_tree.rnode == NULL); - LASSERT(hdr->coh_pages == 0); - cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags); + set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags); /* * Destroy all locks. Object destruction (including cl_inode_fini()) * cannot cancel the locks, because in the case of a local client, @@ -363,65 +374,55 @@ void cl_object_kill(const struct lu_env *env, struct cl_object *obj) EXPORT_SYMBOL(cl_object_kill); /** - * Prunes caches of pages and locks for this object. - */ -void cl_object_prune(const struct lu_env *env, struct cl_object *obj) -{ - ENTRY; - cl_pages_prune(env, obj); - cl_locks_prune(env, obj, 1); - EXIT; -} -EXPORT_SYMBOL(cl_object_prune); - -/** * Check if the object has locks. */ int cl_object_has_locks(struct cl_object *obj) { - struct cl_object_header *head = cl_object_header(obj); - int has; + struct cl_object_header *head = cl_object_header(obj); + int has; - cfs_spin_lock(&head->coh_lock_guard); - has = cfs_list_empty(&head->coh_locks); - cfs_spin_unlock(&head->coh_lock_guard); + spin_lock(&head->coh_lock_guard); + has = list_empty(&head->coh_locks); + spin_unlock(&head->coh_lock_guard); - return (has == 0); + return (has == 0); } EXPORT_SYMBOL(cl_object_has_locks); void cache_stats_init(struct cache_stats *cs, const char *name) { + int i; + cs->cs_name = name; - cfs_atomic_set(&cs->cs_lookup, 0); - cfs_atomic_set(&cs->cs_hit, 0); - cfs_atomic_set(&cs->cs_total, 0); - cfs_atomic_set(&cs->cs_busy, 0); + for (i = 0; i < CS_NR; i++) + atomic_set(&cs->cs_stats[i], 0); } -int cache_stats_print(const struct cache_stats *cs, - char *page, int count, int h) +int cache_stats_print(const struct cache_stats *cs, struct seq_file *m, int h) { - int nob = 0; -/* - lookup hit total cached create - env: ...... ...... ...... ...... ...... -*/ - if (h) - nob += snprintf(page, count, - " lookup hit total busy create\n"); - - nob += snprintf(page + nob, count - nob, - "%5.5s: %6u %6u %6u %6u %6u", - cs->cs_name, - cfs_atomic_read(&cs->cs_lookup), - cfs_atomic_read(&cs->cs_hit), - cfs_atomic_read(&cs->cs_total), - cfs_atomic_read(&cs->cs_busy), - cfs_atomic_read(&cs->cs_created)); - return nob; + int i; + + /* + * lookup hit total cached create + * env: ...... ...... ...... ...... ...... + */ + if (h) { + const char *names[CS_NR] = CS_NAMES; + + seq_printf(m, "%6s", " "); + for (i = 0; i < CS_NR; i++) + seq_printf(m, "%8s", names[i]); + seq_printf(m, "\n"); + } + + seq_printf(m, "%5.5s:", cs->cs_name); + for (i = 0; i < CS_NR; i++) + seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i])); + return 0; } +static void cl_env_percpu_refill(void); + /** * Initialize client site. * @@ -438,11 +439,12 @@ int cl_site_init(struct cl_site *s, struct cl_device *d) cache_stats_init(&s->cs_pages, "pages"); cache_stats_init(&s->cs_locks, "locks"); for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i) - cfs_atomic_set(&s->cs_pages_state[0], 0); + atomic_set(&s->cs_pages_state[0], 0); for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i) - cfs_atomic_set(&s->cs_locks_state[i], 0); - } - return result; + atomic_set(&s->cs_locks_state[i], 0); + cl_env_percpu_refill(); + } + return result; } EXPORT_SYMBOL(cl_site_init); @@ -457,61 +459,55 @@ EXPORT_SYMBOL(cl_site_fini); static struct cache_stats cl_env_stats = { .cs_name = "envs", - .cs_created = CFS_ATOMIC_INIT(0), - .cs_lookup = CFS_ATOMIC_INIT(0), - .cs_hit = CFS_ATOMIC_INIT(0), - .cs_total = CFS_ATOMIC_INIT(0), - .cs_busy = CFS_ATOMIC_INIT(0) + .cs_stats = { ATOMIC_INIT(0), } }; /** * Outputs client site statistical counters into a buffer. Suitable for * ll_rd_*()-style functions. */ -int cl_site_stats_print(const struct cl_site *site, char *page, int count) +int cl_site_stats_print(const struct cl_site *site, struct seq_file *m) { - int nob; - int i; - static const char *pstate[] = { - [CPS_CACHED] = "c", - [CPS_OWNED] = "o", - [CPS_PAGEOUT] = "w", - [CPS_PAGEIN] = "r", - [CPS_FREEING] = "f" - }; - static const char *lstate[] = { - [CLS_NEW] = "n", - [CLS_QUEUING] = "q", - [CLS_ENQUEUED] = "e", - [CLS_HELD] = "h", - [CLS_INTRANSIT] = "t", - [CLS_CACHED] = "c", - [CLS_FREEING] = "f" - }; + static const char *pstate[] = { + [CPS_CACHED] = "c", + [CPS_OWNED] = "o", + [CPS_PAGEOUT] = "w", + [CPS_PAGEIN] = "r", + [CPS_FREEING] = "f" + }; + static const char *lstate[] = { + [CLS_NEW] = "n", + [CLS_QUEUING] = "q", + [CLS_ENQUEUED] = "e", + [CLS_HELD] = "h", + [CLS_INTRANSIT] = "t", + [CLS_CACHED] = "c", + [CLS_FREEING] = "f" + }; + int i; + /* lookup hit total busy create pages: ...... ...... ...... ...... ...... [...... ...... ...... ......] locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......] env: ...... ...... ...... ...... ...... */ - nob = lu_site_stats_print(&site->cs_lu, page, count); - nob += cache_stats_print(&site->cs_pages, page + nob, count - nob, 1); - nob += snprintf(page + nob, count - nob, " ["); - for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i) - nob += snprintf(page + nob, count - nob, "%s: %u ", - pstate[i], - cfs_atomic_read(&site->cs_pages_state[i])); - nob += snprintf(page + nob, count - nob, "]\n"); - nob += cache_stats_print(&site->cs_locks, page + nob, count - nob, 0); - nob += snprintf(page + nob, count - nob, " ["); - for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i) - nob += snprintf(page + nob, count - nob, "%s: %u ", - lstate[i], - cfs_atomic_read(&site->cs_locks_state[i])); - nob += snprintf(page + nob, count - nob, "]\n"); - nob += cache_stats_print(&cl_env_stats, page + nob, count - nob, 0); - nob += snprintf(page + nob, count - nob, "\n"); - return nob; + lu_site_stats_seq_print(&site->cs_lu, m); + cache_stats_print(&site->cs_pages, m, 1); + seq_printf(m, " ["); + for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i) + seq_printf(m, "%s: %u ", pstate[i], + atomic_read(&site->cs_pages_state[i])); + seq_printf(m, "]\n"); + cache_stats_print(&site->cs_locks, m, 0); + seq_printf(m, " ["); + for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i) + seq_printf(m, "%s: %u ", lstate[i], + atomic_read(&site->cs_locks_state[i])); + seq_printf(m, "]\n"); + cache_stats_print(&cl_env_stats, m, 0); + seq_printf(m, "\n"); + return 0; } EXPORT_SYMBOL(cl_site_stats_print); @@ -539,11 +535,11 @@ EXPORT_SYMBOL(cl_site_stats_print); * bz20044, bz22683. */ -static CFS_LIST_HEAD(cl_envs); +static struct list_head cl_envs; static unsigned cl_envs_cached_nr = 0; static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit * for now. */ -static cfs_spinlock_t cl_envs_guard = CFS_SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(cl_envs_guard); struct cl_env { void *ce_magic; @@ -557,26 +553,26 @@ struct cl_env { * This allows cl_env to be entered into cl_env_hash which implements * the current thread -> client environment lookup. */ - cfs_hlist_node_t ce_node; + struct hlist_node ce_node; #endif - /** - * Owner for the current cl_env. - * - * If LL_TASK_CL_ENV is defined, this point to the owning cfs_current(), - * only for debugging purpose ; - * Otherwise hash is used, and this is the key for cfs_hash. - * Now current thread pid is stored. Note using thread pointer would - * lead to unbalanced hash because of its specific allocation locality - * and could be varied for different platforms and OSes, even different - * OS versions. - */ - void *ce_owner; + /** + * Owner for the current cl_env. + * + * If LL_TASK_CL_ENV is defined, this point to the owning current, + * only for debugging purpose ; + * Otherwise hash is used, and this is the key for cfs_hash. + * Now current thread pid is stored. Note using thread pointer would + * lead to unbalanced hash because of its specific allocation locality + * and could be varied for different platforms and OSes, even different + * OS versions. + */ + void *ce_owner; /* * Linkage into global list of all client environments. Used for * garbage collection. */ - cfs_list_t ce_linkage; + struct list_head ce_linkage; /* * */ @@ -588,13 +584,17 @@ struct cl_env { void *ce_debug; }; -#define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.counter) +#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING +#define CL_ENV_INC(counter) atomic_inc(&cl_env_stats.cs_stats[CS_##counter]) -#define CL_ENV_DEC(counter) \ - do { \ - LASSERT(cfs_atomic_read(&cl_env_stats.counter) > 0); \ - cfs_atomic_dec(&cl_env_stats.counter); \ - } while (0) +#define CL_ENV_DEC(counter) do { \ + LASSERT(atomic_read(&cl_env_stats.cs_stats[CS_##counter]) > 0); \ + atomic_dec(&cl_env_stats.cs_stats[CS_##counter]); \ +} while (0) +#else +#define CL_ENV_INC(counter) +#define CL_ENV_DEC(counter) +#endif static void cl_env_init0(struct cl_env *cle, void *debug) { @@ -604,7 +604,7 @@ static void cl_env_init0(struct cl_env *cle, void *debug) cle->ce_ref = 1; cle->ce_debug = debug; - CL_ENV_INC(cs_busy); + CL_ENV_INC(busy); } @@ -625,14 +625,15 @@ static unsigned cl_env_hops_hash(cfs_hash_t *lh, #endif } -static void *cl_env_hops_obj(cfs_hlist_node_t *hn) +static void *cl_env_hops_obj(struct hlist_node *hn) { - struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node); - LASSERT(cle->ce_magic == &cl_env_init0); - return (void *)cle; + struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node); + + LASSERT(cle->ce_magic == &cl_env_init0); + return (void *)cle; } -static int cl_env_hops_keycmp(const void *key, cfs_hlist_node_t *hn) +static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn) { struct cl_env *cle = cl_env_hops_obj(hn); @@ -640,9 +641,9 @@ static int cl_env_hops_keycmp(const void *key, cfs_hlist_node_t *hn) return (key == cle->ce_owner); } -static void cl_env_hops_noop(cfs_hash_t *hs, cfs_hlist_node_t *hn) +static void cl_env_hops_noop(cfs_hash_t *hs, struct hlist_node *hn) { - struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node); + struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node); LASSERT(cle->ce_magic == &cl_env_init0); } @@ -657,35 +658,35 @@ static cfs_hash_ops_t cl_env_hops = { static inline struct cl_env *cl_env_fetch(void) { - struct cl_env *cle; + struct cl_env *cle; - cle = cfs_hash_lookup(cl_env_hash, (void *) (long) cfs_current()->pid); - LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0)); - return cle; + cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid); + LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0)); + return cle; } static inline void cl_env_attach(struct cl_env *cle) { - if (cle) { - int rc; - - LASSERT(cle->ce_owner == NULL); - cle->ce_owner = (void *) (long) cfs_current()->pid; - rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner, - &cle->ce_node); - LASSERT(rc == 0); - } + if (cle) { + int rc; + + LASSERT(cle->ce_owner == NULL); + cle->ce_owner = (void *) (long) current->pid; + rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner, + &cle->ce_node); + LASSERT(rc == 0); + } } static inline void cl_env_do_detach(struct cl_env *cle) { - void *cookie; + void *cookie; - LASSERT(cle->ce_owner == (void *) (long) cfs_current()->pid); - cookie = cfs_hash_del(cl_env_hash, cle->ce_owner, - &cle->ce_node); - LASSERT(cookie == cle); - cle->ce_owner = NULL; + LASSERT(cle->ce_owner == (void *) (long) current->pid); + cookie = cfs_hash_del(cl_env_hash, cle->ce_owner, + &cle->ce_node); + LASSERT(cookie == cle); + cle->ce_owner = NULL; } static int cl_env_store_init(void) { @@ -710,30 +711,30 @@ static void cl_env_store_fini(void) { static inline struct cl_env *cl_env_fetch(void) { - struct cl_env *cle; + struct cl_env *cle; - cle = cfs_current()->LL_TASK_CL_ENV; - if (cle && cle->ce_magic != &cl_env_init0) - cle = NULL; - return cle; + cle = current->LL_TASK_CL_ENV; + if (cle && cle->ce_magic != &cl_env_init0) + cle = NULL; + return cle; } static inline void cl_env_attach(struct cl_env *cle) { - if (cle) { - LASSERT(cle->ce_owner == NULL); - cle->ce_owner = cfs_current(); - cle->ce_prev = cfs_current()->LL_TASK_CL_ENV; - cfs_current()->LL_TASK_CL_ENV = cle; - } + if (cle) { + LASSERT(cle->ce_owner == NULL); + cle->ce_owner = current; + cle->ce_prev = current->LL_TASK_CL_ENV; + current->LL_TASK_CL_ENV = cle; + } } static inline void cl_env_do_detach(struct cl_env *cle) { - LASSERT(cle->ce_owner == cfs_current()); - LASSERT(cfs_current()->LL_TASK_CL_ENV == cle); - cfs_current()->LL_TASK_CL_ENV = cle->ce_prev; - cle->ce_owner = NULL; + LASSERT(cle->ce_owner == current); + LASSERT(current->LL_TASK_CL_ENV == cle); + current->LL_TASK_CL_ENV = cle->ce_prev; + cle->ce_owner = NULL; } static int cl_env_store_init(void) { return 0; } @@ -754,42 +755,42 @@ static inline struct cl_env *cl_env_detach(struct cl_env *cle) static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) { - struct lu_env *env; - struct cl_env *cle; - - OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, CFS_ALLOC_IO); - if (cle != NULL) { - int rc; - - CFS_INIT_LIST_HEAD(&cle->ce_linkage); - cle->ce_magic = &cl_env_init0; - env = &cle->ce_lu; - rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags); - if (rc == 0) { - rc = lu_context_init(&cle->ce_ses, - LCT_SESSION | ses_tags); - if (rc == 0) { - lu_context_enter(&cle->ce_ses); - env->le_ses = &cle->ce_ses; - cl_env_init0(cle, debug); - } else - lu_env_fini(env); - } - if (rc != 0) { - OBD_SLAB_FREE_PTR(cle, cl_env_kmem); - env = ERR_PTR(rc); - } else { - CL_ENV_INC(cs_created); - CL_ENV_INC(cs_total); - } - } else - env = ERR_PTR(-ENOMEM); - return env; + struct lu_env *env; + struct cl_env *cle; + + OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, GFP_NOFS); + if (cle != NULL) { + int rc; + + INIT_LIST_HEAD(&cle->ce_linkage); + cle->ce_magic = &cl_env_init0; + env = &cle->ce_lu; + rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags); + if (rc == 0) { + rc = lu_context_init(&cle->ce_ses, + LCT_SESSION | ses_tags); + if (rc == 0) { + lu_context_enter(&cle->ce_ses); + env->le_ses = &cle->ce_ses; + cl_env_init0(cle, debug); + } else + lu_env_fini(env); + } + if (rc != 0) { + OBD_SLAB_FREE_PTR(cle, cl_env_kmem); + env = ERR_PTR(rc); + } else { + CL_ENV_INC(create); + CL_ENV_INC(total); + } + } else + env = ERR_PTR(-ENOMEM); + return env; } static void cl_env_fini(struct cl_env *cle) { - CL_ENV_DEC(cs_total); + CL_ENV_DEC(total); lu_context_fini(&cle->ce_lu.le_ctx); lu_context_fini(&cle->ce_ses); OBD_SLAB_FREE_PTR(cle, cl_env_kmem); @@ -797,19 +798,19 @@ static void cl_env_fini(struct cl_env *cle) static struct lu_env *cl_env_obtain(void *debug) { - struct cl_env *cle; - struct lu_env *env; + struct cl_env *cle; + struct lu_env *env; - ENTRY; - cfs_spin_lock(&cl_envs_guard); - LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs))); - if (cl_envs_cached_nr > 0) { - int rc; + ENTRY; + spin_lock(&cl_envs_guard); + LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs))); + if (cl_envs_cached_nr > 0) { + int rc; - cle = container_of(cl_envs.next, struct cl_env, ce_linkage); - cfs_list_del_init(&cle->ce_linkage); - cl_envs_cached_nr--; - cfs_spin_unlock(&cl_envs_guard); + cle = container_of(cl_envs.next, struct cl_env, ce_linkage); + list_del_init(&cle->ce_linkage); + cl_envs_cached_nr--; + spin_unlock(&cl_envs_guard); env = &cle->ce_lu; rc = lu_env_refill(env); @@ -822,11 +823,11 @@ static struct lu_env *cl_env_obtain(void *debug) env = ERR_PTR(rc); } } else { - cfs_spin_unlock(&cl_envs_guard); - env = cl_env_new(lu_context_tags_default, - lu_session_tags_default, debug); - } - RETURN(env); + spin_unlock(&cl_envs_guard); + env = cl_env_new(lu_context_tags_default, + lu_session_tags_default, debug); + } + RETURN(env); } static inline struct cl_env *cl_env_container(struct lu_env *env) @@ -839,7 +840,7 @@ struct lu_env *cl_env_peek(int *refcheck) struct lu_env *env; struct cl_env *cle; - CL_ENV_INC(cs_lookup); + CL_ENV_INC(lookup); /* check that we don't go far from untrusted pointer */ CLASSERT(offsetof(struct cl_env, ce_magic) == 0); @@ -847,7 +848,7 @@ struct lu_env *cl_env_peek(int *refcheck) env = NULL; cle = cl_env_fetch(); if (cle != NULL) { - CL_ENV_INC(cs_hit); + CL_ENV_INC(hit); env = &cle->ce_lu; *refcheck = ++cle->ce_ref; } @@ -925,23 +926,23 @@ static void cl_env_exit(struct cl_env *cle) */ unsigned cl_env_cache_purge(unsigned nr) { - struct cl_env *cle; - - ENTRY; - cfs_spin_lock(&cl_envs_guard); - for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) { - cle = container_of(cl_envs.next, struct cl_env, ce_linkage); - cfs_list_del_init(&cle->ce_linkage); - LASSERT(cl_envs_cached_nr > 0); - cl_envs_cached_nr--; - cfs_spin_unlock(&cl_envs_guard); - - cl_env_fini(cle); - cfs_spin_lock(&cl_envs_guard); - } - LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs))); - cfs_spin_unlock(&cl_envs_guard); - RETURN(nr); + struct cl_env *cle; + + ENTRY; + spin_lock(&cl_envs_guard); + for (; !list_empty(&cl_envs) && nr > 0; --nr) { + cle = container_of(cl_envs.next, struct cl_env, ce_linkage); + list_del_init(&cle->ce_linkage); + LASSERT(cl_envs_cached_nr > 0); + cl_envs_cached_nr--; + spin_unlock(&cl_envs_guard); + + cl_env_fini(cle); + spin_lock(&cl_envs_guard); + } + LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs))); + spin_unlock(&cl_envs_guard); + RETURN(nr); } EXPORT_SYMBOL(cl_env_cache_purge); @@ -963,7 +964,7 @@ void cl_env_put(struct lu_env *env, int *refcheck) CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); if (--cle->ce_ref == 0) { - CL_ENV_DEC(cs_busy); + CL_ENV_DEC(busy); cl_env_detach(cle); cle->ce_debug = NULL; cl_env_exit(cle); @@ -976,13 +977,13 @@ void cl_env_put(struct lu_env *env, int *refcheck) if (cl_envs_cached_nr < cl_envs_cached_max && (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD && (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) { - cfs_spin_lock(&cl_envs_guard); - cfs_list_add(&cle->ce_linkage, &cl_envs); - cl_envs_cached_nr++; - cfs_spin_unlock(&cl_envs_guard); - } else - cl_env_fini(cle); - } + spin_lock(&cl_envs_guard); + list_add(&cle->ce_linkage, &cl_envs); + cl_envs_cached_nr++; + spin_unlock(&cl_envs_guard); + } else + cl_env_fini(cle); + } } EXPORT_SYMBOL(cl_env_put); @@ -1108,6 +1109,103 @@ void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb) } EXPORT_SYMBOL(cl_lvb2attr); +static struct cl_env cl_env_percpu[NR_CPUS]; + +static int cl_env_percpu_init(void) +{ + struct cl_env *cle; + int tags = LCT_REMEMBER | LCT_NOREF; + int i, j; + int rc = 0; + + for_each_possible_cpu(i) { + struct lu_env *env; + + cle = &cl_env_percpu[i]; + env = &cle->ce_lu; + + INIT_LIST_HEAD(&cle->ce_linkage); + cle->ce_magic = &cl_env_init0; + rc = lu_env_init(env, LCT_CL_THREAD | tags); + if (rc == 0) { + rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags); + if (rc == 0) { + lu_context_enter(&cle->ce_ses); + env->le_ses = &cle->ce_ses; + } else { + lu_env_fini(env); + } + } + if (rc != 0) + break; + } + if (rc != 0) { + /* Indices 0 to i (excluding i) were correctly initialized, + * thus we must uninitialize up to i, the rest are undefined. */ + for (j = 0; j < i; j++) { + cle = &cl_env_percpu[i]; + lu_context_exit(&cle->ce_ses); + lu_context_fini(&cle->ce_ses); + lu_env_fini(&cle->ce_lu); + } + } + + return rc; +} + +static void cl_env_percpu_fini(void) +{ + int i; + + for_each_possible_cpu(i) { + struct cl_env *cle = &cl_env_percpu[i]; + + lu_context_exit(&cle->ce_ses); + lu_context_fini(&cle->ce_ses); + lu_env_fini(&cle->ce_lu); + } +} + +static void cl_env_percpu_refill(void) +{ + int i; + + for_each_possible_cpu(i) + lu_env_refill(&cl_env_percpu[i].ce_lu); +} + +void cl_env_percpu_put(struct lu_env *env) +{ + struct cl_env *cle; + int cpu; + + cpu = smp_processor_id(); + cle = cl_env_container(env); + LASSERT(cle == &cl_env_percpu[cpu]); + + cle->ce_ref--; + LASSERT(cle->ce_ref == 0); + + CL_ENV_DEC(busy); + cl_env_detach(cle); + cle->ce_debug = NULL; + + put_cpu(); +} +EXPORT_SYMBOL(cl_env_percpu_put); + +struct lu_env *cl_env_percpu_get() +{ + struct cl_env *cle; + + cle = &cl_env_percpu[get_cpu()]; + cl_env_init0(cle, __builtin_return_address(0)); + + cl_env_attach(cle); + return &cle->ce_lu; +} +EXPORT_SYMBOL(cl_env_percpu_get); + /***************************************************************************** * * Temporary prototype thing: mirror obd-devices into cl devices. @@ -1240,11 +1338,13 @@ static struct lu_kmem_descr cl_object_caches[] = { */ int cl_global_init(void) { - int result; + int result; - result = cl_env_store_init(); - if (result) - return result; + INIT_LIST_HEAD(&cl_envs); + + result = cl_env_store_init(); + if (result) + return result; result = lu_kmem_init(cl_object_caches); if (result) @@ -1263,6 +1363,11 @@ int cl_global_init(void) if (result) goto out_lock; + result = cl_env_percpu_init(); + if (result) + /* no cl_env_percpu_fini on error */ + goto out_lock; + return 0; out_lock: cl_lock_fini(); @@ -1280,6 +1385,7 @@ out_store: */ void cl_global_fini(void) { + cl_env_percpu_fini(); cl_lock_fini(); cl_page_fini(); lu_context_key_degister(&cl_key);