-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#include <libcfs/libcfs.h>
/* class_put_type() */
#include <obd_support.h>
#include <lustre_fid.h>
#include <libcfs/list.h>
+#include <libcfs/libcfs_hash.h> /* for cfs_hash stuff */
/* lu_time_global_{init,fini}() */
#include <lu_time.h>
static cfs_mem_cache_t *cl_env_kmem;
/** Lock class of cl_object_header::coh_page_guard */
-static struct lock_class_key cl_page_guard_class;
+static cfs_lock_class_key_t cl_page_guard_class;
/** Lock class of cl_object_header::coh_lock_guard */
-static struct lock_class_key cl_lock_guard_class;
+static cfs_lock_class_key_t cl_lock_guard_class;
/** Lock class of cl_object_header::coh_attr_guard */
-static struct lock_class_key cl_attr_guard_class;
+static cfs_lock_class_key_t cl_attr_guard_class;
+extern __u32 lu_context_tags_default;
+extern __u32 lu_session_tags_default;
/**
* Initialize cl_object_header.
*/
ENTRY;
result = lu_object_header_init(&h->coh_lu);
if (result == 0) {
- spin_lock_init(&h->coh_page_guard);
- spin_lock_init(&h->coh_lock_guard);
- spin_lock_init(&h->coh_attr_guard);
- lockdep_set_class(&h->coh_attr_guard, &cl_page_guard_class);
- lockdep_set_class(&h->coh_attr_guard, &cl_lock_guard_class);
- lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
+ cfs_spin_lock_init(&h->coh_page_guard);
+ cfs_spin_lock_init(&h->coh_lock_guard);
+ cfs_spin_lock_init(&h->coh_attr_guard);
+ cfs_lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
+ cfs_lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
+ cfs_lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
h->coh_pages = 0;
/* XXX hard coded GFP_* mask. */
INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
*/
void cl_object_header_fini(struct cl_object_header *h)
{
- LASSERT(list_empty(&h->coh_locks));
+ LASSERT(cfs_list_empty(&h->coh_locks));
lu_object_header_fini(&h->coh_lu);
}
EXPORT_SYMBOL(cl_object_header_fini);
struct cl_device *cd, const struct lu_fid *fid,
const struct cl_object_conf *c)
{
- might_sleep();
+ cfs_might_sleep();
return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
}
EXPORT_SYMBOL(cl_object_find);
*
* \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
*/
-static spinlock_t *cl_object_attr_guard(struct cl_object *o)
+static cfs_spinlock_t *cl_object_attr_guard(struct cl_object *o)
{
return &cl_object_header(cl_object_top(o))->coh_attr_guard;
}
*/
void cl_object_attr_lock(struct cl_object *o)
{
- spin_lock(cl_object_attr_guard(o));
+ cfs_spin_lock(cl_object_attr_guard(o));
}
EXPORT_SYMBOL(cl_object_attr_lock);
*/
void cl_object_attr_unlock(struct cl_object *o)
{
- spin_unlock(cl_object_attr_guard(o));
+ cfs_spin_unlock(cl_object_attr_guard(o));
}
EXPORT_SYMBOL(cl_object_attr_unlock);
top = obj->co_lu.lo_header;
result = 0;
- list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
if (obj->co_ops->coo_attr_get != NULL) {
result = obj->co_ops->coo_attr_get(env, obj, attr);
if (result != 0) {
top = obj->co_lu.lo_header;
result = 0;
- list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
+ cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
+ co_lu.lo_linkage) {
if (obj->co_ops->coo_attr_set != NULL) {
result = obj->co_ops->coo_attr_set(env, obj, attr, v);
if (result != 0) {
ENTRY;
top = obj->co_lu.lo_header;
result = 0;
- list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
+ cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
+ co_lu.lo_linkage) {
if (obj->co_ops->coo_glimpse != NULL) {
result = obj->co_ops->coo_glimpse(env, obj, lvb);
if (result != 0)
ENTRY;
top = obj->co_lu.lo_header;
result = 0;
- list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
if (obj->co_ops->coo_conf_set != NULL) {
result = obj->co_ops->coo_conf_set(env, obj, conf);
if (result != 0)
LASSERT(hdr->coh_tree.rnode == NULL);
LASSERT(hdr->coh_pages == 0);
- set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
+ cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
/*
* Destroy all locks. Object destruction (including cl_inode_fini())
* cannot cancel the locks, because in the case of a local client,
}
EXPORT_SYMBOL(cl_object_prune);
+/**
+ * Check if the object has locks.
+ */
+int cl_object_has_locks(struct cl_object *obj)
+{
+ struct cl_object_header *head = cl_object_header(obj);
+ int has;
+
+ cfs_spin_lock(&head->coh_lock_guard);
+ has = cfs_list_empty(&head->coh_locks);
+ cfs_spin_unlock(&head->coh_lock_guard);
+
+ return (has == 0);
+}
+EXPORT_SYMBOL(cl_object_has_locks);
+
void cache_stats_init(struct cache_stats *cs, const char *name)
{
cs->cs_name = name;
- atomic_set(&cs->cs_lookup, 0);
- atomic_set(&cs->cs_hit, 0);
- atomic_set(&cs->cs_total, 0);
- atomic_set(&cs->cs_busy, 0);
+ cfs_atomic_set(&cs->cs_lookup, 0);
+ cfs_atomic_set(&cs->cs_hit, 0);
+ cfs_atomic_set(&cs->cs_total, 0);
+ cfs_atomic_set(&cs->cs_busy, 0);
}
int cache_stats_print(const struct cache_stats *cs,
nob += snprintf(page + nob, count - nob,
"%5.5s: %6u %6u %6u %6u %6u",
cs->cs_name,
- atomic_read(&cs->cs_lookup),
- atomic_read(&cs->cs_hit),
- atomic_read(&cs->cs_total),
- atomic_read(&cs->cs_busy),
- atomic_read(&cs->cs_created));
+ cfs_atomic_read(&cs->cs_lookup),
+ cfs_atomic_read(&cs->cs_hit),
+ cfs_atomic_read(&cs->cs_total),
+ cfs_atomic_read(&cs->cs_busy),
+ cfs_atomic_read(&cs->cs_created));
return nob;
}
cache_stats_init(&s->cs_pages, "pages");
cache_stats_init(&s->cs_locks, "locks");
for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
- atomic_set(&s->cs_pages_state[0], 0);
+ cfs_atomic_set(&s->cs_pages_state[0], 0);
for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
- atomic_set(&s->cs_locks_state[i], 0);
+ cfs_atomic_set(&s->cs_locks_state[i], 0);
}
return result;
}
static struct cache_stats cl_env_stats = {
.cs_name = "envs",
- .cs_created = ATOMIC_INIT(0),
- .cs_lookup = ATOMIC_INIT(0),
- .cs_hit = ATOMIC_INIT(0),
- .cs_total = ATOMIC_INIT(0),
- .cs_busy = ATOMIC_INIT(0)
+ .cs_created = CFS_ATOMIC_INIT(0),
+ .cs_lookup = CFS_ATOMIC_INIT(0),
+ .cs_hit = CFS_ATOMIC_INIT(0),
+ .cs_total = CFS_ATOMIC_INIT(0),
+ .cs_busy = CFS_ATOMIC_INIT(0)
};
/**
[CLS_QUEUING] = "q",
[CLS_ENQUEUED] = "e",
[CLS_HELD] = "h",
- [CLS_UNLOCKING] = "u",
+ [CLS_INTRANSIT] = "t",
[CLS_CACHED] = "c",
[CLS_FREEING] = "f"
};
for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
nob += snprintf(page + nob, count - nob, "%s: %u ",
pstate[i],
- atomic_read(&site->cs_pages_state[i]));
+ cfs_atomic_read(&site->cs_pages_state[i]));
nob += snprintf(page + nob, count - nob, "]\n");
nob += cache_stats_print(&site->cs_locks, page + nob, count - nob, 0);
nob += snprintf(page + nob, count - nob, " [");
for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
nob += snprintf(page + nob, count - nob, "%s: %u ",
lstate[i],
- atomic_read(&site->cs_locks_state[i]));
+ cfs_atomic_read(&site->cs_locks_state[i]));
nob += snprintf(page + nob, count - nob, "]\n");
nob += cache_stats_print(&cl_env_stats, page + nob, count - nob, 0);
nob += snprintf(page + nob, count - nob, "\n");
*
*/
-/*
- * TBD: Description.
+/**
+ * The most efficient way is to store cl_env pointer in task specific
+ * structures. On Linux, it wont' be easy to use task_struct->journal_info
+ * because Lustre code may call into other fs which has certain assumptions
+ * about journal_info. Currently following fields in task_struct are identified
+ * can be used for this purpose:
+ * - cl_env: for liblustre.
+ * - tux_info: ony on RedHat kernel.
+ * - ...
+ * \note As long as we use task_struct to store cl_env, we assume that once
+ * called into Lustre, we'll never call into the other part of the kernel
+ * which will use those fields in task_struct without explicitly exiting
+ * Lustre.
*
- * XXX: this assumes that re-entrant file system calls (e.g., ->writepage())
- * do not modify already existing current->journal_info.
+ * If there's no space in task_struct is available, hash will be used.
+ * bz20044, bz22683.
*/
static CFS_LIST_HEAD(cl_envs);
static unsigned cl_envs_cached_nr = 0;
static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
* for now. */
-static spinlock_t cl_envs_guard = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(cl_envs_guard);
struct cl_env {
void *ce_magic;
struct lu_env ce_lu;
struct lu_context ce_ses;
+
+#ifdef LL_TASK_CL_ENV
+ void *ce_prev;
+#else
+ /**
+ * This allows cl_env to be entered into cl_env_hash which implements
+ * the current thread -> client environment lookup.
+ */
+ cfs_hlist_node_t ce_node;
+#endif
+ /**
+ * Owner for the current cl_env.
+ *
+ * If LL_TASK_CL_ENV is defined, this point to the owning cfs_current(),
+ * only for debugging purpose ;
+ * Otherwise hash is used, and this is the key for cfs_hash.
+ * Now current thread pid is stored. Note using thread pointer would
+ * lead to unbalanced hash because of its specific allocation locality
+ * and could be varied for different platforms and OSes, even different
+ * OS versions.
+ */
+ void *ce_owner;
+
/*
* Linkage into global list of all client environments. Used for
* garbage collection.
*/
- struct list_head ce_linkage;
+ cfs_list_t ce_linkage;
/*
*
*/
int ce_ref;
- void *ce_prev;
/*
* Debugging field: address of the caller who made original
* allocation.
*/
void *ce_debug;
- void *ce_owner;
};
-#define CL_ENV_INC(counter) atomic_inc(&cl_env_stats.counter)
+#define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.counter)
#define CL_ENV_DEC(counter) \
do { \
- LASSERT(atomic_read(&cl_env_stats.counter) > 0); \
- atomic_dec(&cl_env_stats.counter); \
+ LASSERT(cfs_atomic_read(&cl_env_stats.counter) > 0); \
+ cfs_atomic_dec(&cl_env_stats.counter); \
} while (0)
static void cl_env_init0(struct cl_env *cle, void *debug)
LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
cle->ce_ref = 1;
- cle->ce_prev = current->journal_info;
cle->ce_debug = debug;
- cle->ce_owner = current;
- current->journal_info = cle;
CL_ENV_INC(cs_busy);
}
-static struct lu_env *cl_env_new(__u32 tags, void *debug)
+
+#ifndef LL_TASK_CL_ENV
+/*
+ * The implementation of using hash table to connect cl_env and thread
+ */
+
+static cfs_hash_t *cl_env_hash;
+
+static unsigned cl_env_hops_hash(cfs_hash_t *lh,
+ const void *key, unsigned mask)
+{
+#if BITS_PER_LONG == 64
+ return cfs_hash_u64_hash((__u64)key, mask);
+#else
+ return cfs_hash_u32_hash((__u32)key, mask);
+#endif
+}
+
+static void *cl_env_hops_obj(cfs_hlist_node_t *hn)
+{
+ struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node);
+ LASSERT(cle->ce_magic == &cl_env_init0);
+ return (void *)cle;
+}
+
+static int cl_env_hops_keycmp(const void *key, cfs_hlist_node_t *hn)
+{
+ struct cl_env *cle = cl_env_hops_obj(hn);
+
+ LASSERT(cle->ce_owner != NULL);
+ return (key == cle->ce_owner);
+}
+
+static void cl_env_hops_noop(cfs_hash_t *hs, cfs_hlist_node_t *hn)
+{
+ struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node);
+ LASSERT(cle->ce_magic == &cl_env_init0);
+}
+
+static cfs_hash_ops_t cl_env_hops = {
+ .hs_hash = cl_env_hops_hash,
+ .hs_key = cl_env_hops_obj,
+ .hs_keycmp = cl_env_hops_keycmp,
+ .hs_object = cl_env_hops_obj,
+ .hs_get = cl_env_hops_noop,
+ .hs_put_locked = cl_env_hops_noop,
+};
+
+static inline struct cl_env *cl_env_fetch(void)
+{
+ struct cl_env *cle;
+
+ cle = cfs_hash_lookup(cl_env_hash, (void *) (long) cfs_current()->pid);
+ LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
+ return cle;
+}
+
+static inline void cl_env_attach(struct cl_env *cle)
+{
+ if (cle) {
+ int rc;
+
+ LASSERT(cle->ce_owner == NULL);
+ cle->ce_owner = (void *) (long) cfs_current()->pid;
+ rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
+ &cle->ce_node);
+ LASSERT(rc == 0);
+ }
+}
+
+static inline void cl_env_do_detach(struct cl_env *cle)
+{
+ void *cookie;
+
+ LASSERT(cle->ce_owner == (void *) (long) cfs_current()->pid);
+ cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
+ &cle->ce_node);
+ LASSERT(cookie == cle);
+ cle->ce_owner = NULL;
+}
+
+static int cl_env_store_init(void) {
+ cl_env_hash = cfs_hash_create("cl_env",
+ HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
+ HASH_CL_ENV_BKT_BITS, 0,
+ CFS_HASH_MIN_THETA,
+ CFS_HASH_MAX_THETA,
+ &cl_env_hops,
+ CFS_HASH_RW_BKTLOCK);
+ return cl_env_hash != NULL ? 0 :-ENOMEM;
+}
+
+static void cl_env_store_fini(void) {
+ cfs_hash_putref(cl_env_hash);
+}
+
+#else /* LL_TASK_CL_ENV */
+/*
+ * The implementation of store cl_env directly in thread structure.
+ */
+
+static inline struct cl_env *cl_env_fetch(void)
+{
+ struct cl_env *cle;
+
+ cle = cfs_current()->LL_TASK_CL_ENV;
+ if (cle && cle->ce_magic != &cl_env_init0)
+ cle = NULL;
+ return cle;
+}
+
+static inline void cl_env_attach(struct cl_env *cle)
+{
+ if (cle) {
+ LASSERT(cle->ce_owner == NULL);
+ cle->ce_owner = cfs_current();
+ cle->ce_prev = cfs_current()->LL_TASK_CL_ENV;
+ cfs_current()->LL_TASK_CL_ENV = cle;
+ }
+}
+
+static inline void cl_env_do_detach(struct cl_env *cle)
+{
+ LASSERT(cle->ce_owner == cfs_current());
+ LASSERT(cfs_current()->LL_TASK_CL_ENV == cle);
+ cfs_current()->LL_TASK_CL_ENV = cle->ce_prev;
+ cle->ce_owner = NULL;
+}
+
+static int cl_env_store_init(void) { return 0; }
+static void cl_env_store_fini(void) { }
+
+#endif /* LL_TASK_CL_ENV */
+
+static inline struct cl_env *cl_env_detach(struct cl_env *cle)
+{
+ if (cle == NULL)
+ cle = cl_env_fetch();
+
+ if (cle && cle->ce_owner)
+ cl_env_do_detach(cle);
+
+ return cle;
+}
+
+static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
{
struct lu_env *env;
struct cl_env *cle;
- OBD_SLAB_ALLOC_PTR(cle, cl_env_kmem);
+ OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, CFS_ALLOC_IO);
if (cle != NULL) {
int rc;
CFS_INIT_LIST_HEAD(&cle->ce_linkage);
cle->ce_magic = &cl_env_init0;
env = &cle->ce_lu;
- rc = lu_env_init(env, LCT_CL_THREAD|tags);
+ rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
if (rc == 0) {
- rc = lu_context_init(&cle->ce_ses, LCT_SESSION|tags);
+ rc = lu_context_init(&cle->ce_ses,
+ LCT_SESSION | ses_tags);
if (rc == 0) {
lu_context_enter(&cle->ce_ses);
env->le_ses = &cle->ce_ses;
struct lu_env *env;
ENTRY;
- spin_lock(&cl_envs_guard);
- LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
+ cfs_spin_lock(&cl_envs_guard);
+ LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
if (cl_envs_cached_nr > 0) {
int rc;
cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
- list_del_init(&cle->ce_linkage);
+ cfs_list_del_init(&cle->ce_linkage);
cl_envs_cached_nr--;
- spin_unlock(&cl_envs_guard);
+ cfs_spin_unlock(&cl_envs_guard);
env = &cle->ce_lu;
rc = lu_env_refill(env);
env = ERR_PTR(rc);
}
} else {
- spin_unlock(&cl_envs_guard);
- env = cl_env_new(0, debug);
+ cfs_spin_unlock(&cl_envs_guard);
+ env = cl_env_new(lu_context_tags_default,
+ lu_session_tags_default, debug);
}
RETURN(env);
}
CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
env = NULL;
- cle = current->journal_info;
- if (cle != NULL && cle->ce_magic == &cl_env_init0) {
+ cle = cl_env_fetch();
+ if (cle != NULL) {
CL_ENV_INC(cs_hit);
env = &cle->ce_lu;
*refcheck = ++cle->ce_ref;
}
- CDEBUG(D_OTHER, "%i@%p\n", cle ? cle->ce_ref : 0, cle);
+ CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
return env;
}
EXPORT_SYMBOL(cl_env_peek);
struct cl_env *cle;
cle = cl_env_container(env);
+ cl_env_attach(cle);
*refcheck = cle->ce_ref;
- CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle);
+ CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
}
}
return env;
struct lu_env *env;
LASSERT(cl_env_peek(refcheck) == NULL);
- env = cl_env_new(tags, __builtin_return_address(0));
+ env = cl_env_new(tags, tags, __builtin_return_address(0));
if (!IS_ERR(env)) {
struct cl_env *cle;
cle = cl_env_container(env);
*refcheck = cle->ce_ref;
- CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle);
+ CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
}
return env;
}
static void cl_env_exit(struct cl_env *cle)
{
+ LASSERT(cle->ce_owner == NULL);
lu_context_exit(&cle->ce_lu.le_ctx);
lu_context_exit(&cle->ce_ses);
}
struct cl_env *cle;
ENTRY;
- spin_lock(&cl_envs_guard);
- for (; !list_empty(&cl_envs) && nr > 0; --nr) {
+ cfs_spin_lock(&cl_envs_guard);
+ for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) {
cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
- list_del_init(&cle->ce_linkage);
+ cfs_list_del_init(&cle->ce_linkage);
LASSERT(cl_envs_cached_nr > 0);
cl_envs_cached_nr--;
- spin_unlock(&cl_envs_guard);
+ cfs_spin_unlock(&cl_envs_guard);
cl_env_fini(cle);
- spin_lock(&cl_envs_guard);
+ cfs_spin_lock(&cl_envs_guard);
}
- LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
- spin_unlock(&cl_envs_guard);
+ LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
+ cfs_spin_unlock(&cl_envs_guard);
RETURN(nr);
}
EXPORT_SYMBOL(cl_env_cache_purge);
LASSERT(cle->ce_ref > 0);
LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
- CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle);
+ CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
if (--cle->ce_ref == 0) {
CL_ENV_DEC(cs_busy);
- current->journal_info = cle->ce_prev;
- LASSERT(cle->ce_prev == NULL ||
- cl_env_container(cle->ce_prev)->ce_magic !=
- &cl_env_init0);
+ cl_env_detach(cle);
cle->ce_debug = NULL;
- cle->ce_owner = NULL;
cl_env_exit(cle);
/*
* Don't bother to take a lock here.
if (cl_envs_cached_nr < cl_envs_cached_max &&
(env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
(env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
- spin_lock(&cl_envs_guard);
- list_add(&cle->ce_linkage, &cl_envs);
+ cfs_spin_lock(&cl_envs_guard);
+ cfs_list_add(&cle->ce_linkage, &cl_envs);
cl_envs_cached_nr++;
- spin_unlock(&cl_envs_guard);
+ cfs_spin_unlock(&cl_envs_guard);
} else
cl_env_fini(cle);
}
/**
* Declares a point of re-entrancy.
*
- * In Linux kernel environments are attached to the thread through
- * current->journal_info pointer that is used by other sub-systems also. When
- * lustre code is invoked in the situation where current->journal_info is
- * potentially already set, cl_env_reenter() is called to save
- * current->journal_info value, so that current->journal_info field can be
- * used to store pointer to the environment.
- *
* \see cl_env_reexit()
*/
void *cl_env_reenter(void)
{
- void *cookie;
-
- cookie = current->journal_info;
- current->journal_info = NULL;
- CDEBUG(D_OTHER, "cookie: %p\n", cookie);
- return cookie;
+ return cl_env_detach(NULL);
}
EXPORT_SYMBOL(cl_env_reenter);
/**
* Exits re-entrancy.
- *
- * This restores old value of current->journal_info that was saved by
- * cl_env_reenter().
*/
void cl_env_reexit(void *cookie)
{
- current->journal_info = cookie;
- CDEBUG(D_OTHER, "cookie: %p\n", cookie);
+ cl_env_detach(NULL);
+ cl_env_attach(cookie);
}
EXPORT_SYMBOL(cl_env_reexit);
{
struct cl_env *cle = cl_env_container(env);
- LASSERT(current->journal_info == NULL);
LASSERT(cle->ce_ref > 0);
- current->journal_info = cle;
+ cl_env_attach(cle);
cl_env_get(refcheck);
- CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle);
+ CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
}
EXPORT_SYMBOL(cl_env_implant);
{
struct cl_env *cle = cl_env_container(env);
- LASSERT(cle == current->journal_info);
LASSERT(cle->ce_ref > 1);
- CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle);
+ CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
+ cl_env_detach(cle);
cl_env_put(env, refcheck);
- current->journal_info = NULL;
}
EXPORT_SYMBOL(cl_env_unplant);
}
}
env = cl_env_get(&nest->cen_refcheck);
- LASSERT(ergo(!IS_ERR(env), !cl_io_is_going(env)));
+ if (IS_ERR(env)) {
+ cl_env_reexit(nest->cen_cookie);
+ return env;
+ }
+
+ LASSERT(!cl_io_is_going(env));
return env;
}
EXPORT_SYMBOL(cl_env_nested_get);
}
EXPORT_SYMBOL(cl_lvb2attr);
-
/*****************************************************************************
*
* Temporary prototype thing: mirror obd-devices into cl devices.
struct cl_thread_info *info;
info = cl0_key_init(ctx, key);
- if (!IS_ERR(info))
- lu_ref_init(&info->clt_locks_locked);
+ if (!IS_ERR(info)) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
+ lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
+ }
return info;
}
struct lu_context_key *key, void *data)
{
struct cl_thread_info *info;
+ int i;
info = data;
- lu_ref_fini(&info->clt_locks_locked);
+ for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
+ lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
cl0_key_fini(ctx, key, data);
}
struct lu_context_key *key, void *data)
{
struct cl_thread_info *info = data;
+ int i;
- LASSERT(info->clt_nr_locks_locked == 0);
- LASSERT(info->clt_nr_held == 0);
- LASSERT(info->clt_nr_used == 0);
- LASSERT(info->clt_nr_locks_acquired == 0);
-
- lu_ref_fini(&info->clt_locks_locked);
- lu_ref_init(&info->clt_locks_locked);
+ for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
+ LASSERT(info->clt_counters[i].ctc_nr_held == 0);
+ LASSERT(info->clt_counters[i].ctc_nr_used == 0);
+ LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
+ LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
+ lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
+ lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
+ }
}
static struct lu_context_key cl_key = {
{
int result;
+ result = cl_env_store_init();
+ if (result)
+ return result;
+
result = lu_kmem_init(cl_object_caches);
- if (result == 0) {
- LU_CONTEXT_KEY_INIT(&cl_key);
- result = lu_context_key_register(&cl_key);
- if (result == 0) {
- result = cl_lock_init();
- if (result == 0)
- result = cl_page_init();
- }
- }
+ if (result)
+ goto out_store;
+
+ LU_CONTEXT_KEY_INIT(&cl_key);
+ result = lu_context_key_register(&cl_key);
+ if (result)
+ goto out_kmem;
+
+ result = cl_lock_init();
+ if (result)
+ goto out_context;
+
+ result = cl_page_init();
+ if (result)
+ goto out_lock;
+
+ return 0;
+out_lock:
+ cl_lock_fini();
+out_context:
+ lu_context_key_degister(&cl_key);
+out_kmem:
+ lu_kmem_fini(cl_object_caches);
+out_store:
+ cl_env_store_fini();
return result;
}
cl_page_fini();
lu_context_key_degister(&cl_key);
lu_kmem_fini(cl_object_caches);
+ cl_env_store_fini();
}