* Client Lustre Object.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
/*
*
* i_mutex
* PG_locked
- * ->coh_page_guard
* ->coh_lock_guard
* ->coh_attr_guard
* ->ls_guard
static struct kmem_cache *cl_env_kmem;
-/** Lock class of cl_object_header::coh_page_guard */
-static struct lock_class_key cl_page_guard_class;
/** Lock class of cl_object_header::coh_lock_guard */
static struct lock_class_key cl_lock_guard_class;
/** Lock class of cl_object_header::coh_attr_guard */
*/
int cl_object_header_init(struct cl_object_header *h)
{
- int result;
+ int result;
- ENTRY;
- result = lu_object_header_init(&h->coh_lu);
- if (result == 0) {
- spin_lock_init(&h->coh_page_guard);
+ ENTRY;
+ result = lu_object_header_init(&h->coh_lu);
+ if (result == 0) {
spin_lock_init(&h->coh_lock_guard);
spin_lock_init(&h->coh_attr_guard);
- lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
- h->coh_pages = 0;
- /* XXX hard coded GFP_* mask. */
- INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
- CFS_INIT_LIST_HEAD(&h->coh_locks);
- h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
- }
- RETURN(result);
+ CFS_INIT_LIST_HEAD(&h->coh_locks);
+ h->coh_page_bufsize = 0;
+ }
+ RETURN(result);
}
EXPORT_SYMBOL(cl_object_header_init);
struct cl_device *cd, const struct lu_fid *fid,
const struct cl_object_conf *c)
{
- cfs_might_sleep();
+ might_sleep();
return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
}
EXPORT_SYMBOL(cl_object_find);
/**
* Returns the top-object for a given \a o.
*
- * \see cl_page_top(), cl_io_top()
+ * \see cl_io_top()
*/
struct cl_object *cl_object_top(struct cl_object *o)
{
struct lu_object_header *top;
int result;
- LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+ assert_spin_locked(cl_object_attr_guard(obj));
ENTRY;
top = obj->co_lu.lo_header;
struct lu_object_header *top;
int result;
- LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+ assert_spin_locked(cl_object_attr_guard(obj));
ENTRY;
top = obj->co_lu.lo_header;
EXPORT_SYMBOL(cl_conf_set);
/**
+ * Prunes caches of pages and locks for this object.
+ */
+void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
+{
+ struct lu_object_header *top;
+ struct cl_object *o;
+ int result;
+ ENTRY;
+
+ top = obj->co_lu.lo_header;
+ result = 0;
+ cfs_list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
+ if (o->co_ops->coo_prune != NULL) {
+ result = o->co_ops->coo_prune(env, o);
+ if (result != 0)
+ break;
+ }
+ }
+
+ /* TODO: pruning locks will be moved into layers after cl_lock
+ * simplification is done */
+ cl_locks_prune(env, obj, 1);
+ EXIT;
+}
+EXPORT_SYMBOL(cl_object_prune);
+
+/**
* Helper function removing all object locks, and marking object for
* deletion. All object pages must have been deleted at this point.
*
struct cl_object_header *hdr;
hdr = cl_object_header(obj);
- LASSERT(hdr->coh_tree.rnode == NULL);
- LASSERT(hdr->coh_pages == 0);
set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
/*
EXPORT_SYMBOL(cl_object_kill);
/**
- * Prunes caches of pages and locks for this object.
- */
-void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
-{
- ENTRY;
- cl_pages_prune(env, obj);
- cl_locks_prune(env, obj, 1);
- EXIT;
-}
-EXPORT_SYMBOL(cl_object_prune);
-
-/**
* Check if the object has locks.
*/
int cl_object_has_locks(struct cl_object *obj)
cs->cs_name = name;
for (i = 0; i < CS_NR; i++)
- cfs_atomic_set(&cs->cs_stats[i], 0);
+ atomic_set(&cs->cs_stats[i], 0);
}
-int cache_stats_print(const struct cache_stats *cs,
- char *page, int count, int h)
+int cache_stats_print(const struct cache_stats *cs, struct seq_file *m, int h)
{
- int nob = 0;
int i;
+
/*
* lookup hit total cached create
* env: ...... ...... ...... ...... ......
if (h) {
const char *names[CS_NR] = CS_NAMES;
- nob += snprintf(page + nob, count - nob, "%6s", " ");
+ seq_printf(m, "%6s", " ");
for (i = 0; i < CS_NR; i++)
- nob += snprintf(page + nob, count - nob,
- "%8s", names[i]);
- nob += snprintf(page + nob, count - nob, "\n");
+ seq_printf(m, "%8s", names[i]);
+ seq_printf(m, "\n");
}
- nob += snprintf(page + nob, count - nob, "%5.5s:", cs->cs_name);
+ seq_printf(m, "%5.5s:", cs->cs_name);
for (i = 0; i < CS_NR; i++)
- nob += snprintf(page + nob, count - nob, "%8u",
- cfs_atomic_read(&cs->cs_stats[i]));
- return nob;
+ seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i]));
+ return 0;
}
+static void cl_env_percpu_refill(void);
+
/**
* Initialize client site.
*
cache_stats_init(&s->cs_pages, "pages");
cache_stats_init(&s->cs_locks, "locks");
for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
- cfs_atomic_set(&s->cs_pages_state[0], 0);
+ atomic_set(&s->cs_pages_state[0], 0);
for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
- cfs_atomic_set(&s->cs_locks_state[i], 0);
- }
- return result;
+ atomic_set(&s->cs_locks_state[i], 0);
+ cl_env_percpu_refill();
+ }
+ return result;
}
EXPORT_SYMBOL(cl_site_init);
static struct cache_stats cl_env_stats = {
.cs_name = "envs",
- .cs_stats = { CFS_ATOMIC_INIT(0), }
+ .cs_stats = { ATOMIC_INIT(0), }
};
/**
* Outputs client site statistical counters into a buffer. Suitable for
* ll_rd_*()-style functions.
*/
-int cl_site_stats_print(const struct cl_site *site, char *page, int count)
+int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
{
- int nob;
- int i;
- static const char *pstate[] = {
- [CPS_CACHED] = "c",
- [CPS_OWNED] = "o",
- [CPS_PAGEOUT] = "w",
- [CPS_PAGEIN] = "r",
- [CPS_FREEING] = "f"
- };
- static const char *lstate[] = {
- [CLS_NEW] = "n",
- [CLS_QUEUING] = "q",
- [CLS_ENQUEUED] = "e",
- [CLS_HELD] = "h",
- [CLS_INTRANSIT] = "t",
- [CLS_CACHED] = "c",
- [CLS_FREEING] = "f"
- };
+ static const char *pstate[] = {
+ [CPS_CACHED] = "c",
+ [CPS_OWNED] = "o",
+ [CPS_PAGEOUT] = "w",
+ [CPS_PAGEIN] = "r",
+ [CPS_FREEING] = "f"
+ };
+ static const char *lstate[] = {
+ [CLS_NEW] = "n",
+ [CLS_QUEUING] = "q",
+ [CLS_ENQUEUED] = "e",
+ [CLS_HELD] = "h",
+ [CLS_INTRANSIT] = "t",
+ [CLS_CACHED] = "c",
+ [CLS_FREEING] = "f"
+ };
+ int i;
+
/*
lookup hit total busy create
pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
env: ...... ...... ...... ...... ......
*/
- nob = lu_site_stats_print(&site->cs_lu, page, count);
- nob += cache_stats_print(&site->cs_pages, page + nob, count - nob, 1);
- nob += snprintf(page + nob, count - nob, " [");
- for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
- nob += snprintf(page + nob, count - nob, "%s: %u ",
- pstate[i],
- cfs_atomic_read(&site->cs_pages_state[i]));
- nob += snprintf(page + nob, count - nob, "]\n");
- nob += cache_stats_print(&site->cs_locks, page + nob, count - nob, 0);
- nob += snprintf(page + nob, count - nob, " [");
- for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
- nob += snprintf(page + nob, count - nob, "%s: %u ",
- lstate[i],
- cfs_atomic_read(&site->cs_locks_state[i]));
- nob += snprintf(page + nob, count - nob, "]\n");
- nob += cache_stats_print(&cl_env_stats, page + nob, count - nob, 0);
- nob += snprintf(page + nob, count - nob, "\n");
- return nob;
+ lu_site_stats_seq_print(&site->cs_lu, m);
+ cache_stats_print(&site->cs_pages, m, 1);
+ seq_printf(m, " [");
+ for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
+ seq_printf(m, "%s: %u ", pstate[i],
+ atomic_read(&site->cs_pages_state[i]));
+ seq_printf(m, "]\n");
+ cache_stats_print(&site->cs_locks, m, 0);
+ seq_printf(m, " [");
+ for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
+ seq_printf(m, "%s: %u ", lstate[i],
+ atomic_read(&site->cs_locks_state[i]));
+ seq_printf(m, "]\n");
+ cache_stats_print(&cl_env_stats, m, 0);
+ seq_printf(m, "\n");
+ return 0;
}
EXPORT_SYMBOL(cl_site_stats_print);
* bz20044, bz22683.
*/
+static CFS_LIST_HEAD(cl_envs);
+static unsigned cl_envs_cached_nr = 0;
+static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
+ * for now. */
+static DEFINE_SPINLOCK(cl_envs_guard);
+
struct cl_env {
void *ce_magic;
struct lu_env ce_lu;
*/
cfs_hlist_node_t ce_node;
#endif
- /**
- * Owner for the current cl_env.
- *
- * If LL_TASK_CL_ENV is defined, this point to the owning cfs_current(),
- * only for debugging purpose ;
- * Otherwise hash is used, and this is the key for cfs_hash.
- * Now current thread pid is stored. Note using thread pointer would
- * lead to unbalanced hash because of its specific allocation locality
- * and could be varied for different platforms and OSes, even different
- * OS versions.
- */
- void *ce_owner;
+ /**
+ * Owner for the current cl_env.
+ *
+ * If LL_TASK_CL_ENV is defined, this point to the owning current,
+ * only for debugging purpose ;
+ * Otherwise hash is used, and this is the key for cfs_hash.
+ * Now current thread pid is stored. Note using thread pointer would
+ * lead to unbalanced hash because of its specific allocation locality
+ * and could be varied for different platforms and OSes, even different
+ * OS versions.
+ */
+ void *ce_owner;
/*
* Linkage into global list of all client environments. Used for
};
#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
-#define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.cs_stats[CS_##counter])
+#define CL_ENV_INC(counter) atomic_inc(&cl_env_stats.cs_stats[CS_##counter])
#define CL_ENV_DEC(counter) do { \
- LASSERT(cfs_atomic_read(&cl_env_stats.cs_stats[CS_##counter]) > 0); \
- cfs_atomic_dec(&cl_env_stats.cs_stats[CS_##counter]); \
+ LASSERT(atomic_read(&cl_env_stats.cs_stats[CS_##counter]) > 0); \
+ atomic_dec(&cl_env_stats.cs_stats[CS_##counter]); \
} while (0)
#else
#define CL_ENV_INC(counter)
static inline struct cl_env *cl_env_fetch(void)
{
- struct cl_env *cle;
+ struct cl_env *cle;
- cle = cfs_hash_lookup(cl_env_hash, (void *) (long) cfs_current()->pid);
- LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
- return cle;
+ cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid);
+ LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
+ return cle;
}
static inline void cl_env_attach(struct cl_env *cle)
{
- if (cle) {
- int rc;
-
- LASSERT(cle->ce_owner == NULL);
- cle->ce_owner = (void *) (long) cfs_current()->pid;
- rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
- &cle->ce_node);
- LASSERT(rc == 0);
- }
+ if (cle) {
+ int rc;
+
+ LASSERT(cle->ce_owner == NULL);
+ cle->ce_owner = (void *) (long) current->pid;
+ rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
+ &cle->ce_node);
+ LASSERT(rc == 0);
+ }
}
static inline void cl_env_do_detach(struct cl_env *cle)
{
- void *cookie;
+ void *cookie;
- LASSERT(cle->ce_owner == (void *) (long) cfs_current()->pid);
- cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
- &cle->ce_node);
- LASSERT(cookie == cle);
- cle->ce_owner = NULL;
+ LASSERT(cle->ce_owner == (void *) (long) current->pid);
+ cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
+ &cle->ce_node);
+ LASSERT(cookie == cle);
+ cle->ce_owner = NULL;
}
static int cl_env_store_init(void) {
static inline struct cl_env *cl_env_fetch(void)
{
- struct cl_env *cle;
+ struct cl_env *cle;
- cle = cfs_current()->LL_TASK_CL_ENV;
- if (cle && cle->ce_magic != &cl_env_init0)
- cle = NULL;
- return cle;
+ cle = current->LL_TASK_CL_ENV;
+ if (cle && cle->ce_magic != &cl_env_init0)
+ cle = NULL;
+ return cle;
}
static inline void cl_env_attach(struct cl_env *cle)
{
- if (cle) {
- LASSERT(cle->ce_owner == NULL);
- cle->ce_owner = cfs_current();
- cle->ce_prev = cfs_current()->LL_TASK_CL_ENV;
- cfs_current()->LL_TASK_CL_ENV = cle;
- }
+ if (cle) {
+ LASSERT(cle->ce_owner == NULL);
+ cle->ce_owner = current;
+ cle->ce_prev = current->LL_TASK_CL_ENV;
+ current->LL_TASK_CL_ENV = cle;
+ }
}
static inline void cl_env_do_detach(struct cl_env *cle)
{
- LASSERT(cle->ce_owner == cfs_current());
- LASSERT(cfs_current()->LL_TASK_CL_ENV == cle);
- cfs_current()->LL_TASK_CL_ENV = cle->ce_prev;
- cle->ce_owner = NULL;
+ LASSERT(cle->ce_owner == current);
+ LASSERT(current->LL_TASK_CL_ENV == cle);
+ current->LL_TASK_CL_ENV = cle->ce_prev;
+ cle->ce_owner = NULL;
}
static int cl_env_store_init(void) { return 0; }
static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
{
- struct lu_env *env;
- struct cl_env *cle;
-
- OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, __GFP_IO);
- if (cle != NULL) {
- int rc;
-
- CFS_INIT_LIST_HEAD(&cle->ce_linkage);
- cle->ce_magic = &cl_env_init0;
- env = &cle->ce_lu;
- rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
- if (rc == 0) {
- rc = lu_context_init(&cle->ce_ses,
- LCT_SESSION | ses_tags);
- if (rc == 0) {
- lu_context_enter(&cle->ce_ses);
- env->le_ses = &cle->ce_ses;
- cl_env_init0(cle, debug);
- } else
- lu_env_fini(env);
- }
- if (rc != 0) {
- OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
- env = ERR_PTR(rc);
- } else {
- CL_ENV_INC(create);
- CL_ENV_INC(total);
- }
- } else
- env = ERR_PTR(-ENOMEM);
- return env;
+ struct lu_env *env;
+ struct cl_env *cle;
+
+ OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, GFP_NOFS);
+ if (cle != NULL) {
+ int rc;
+
+ CFS_INIT_LIST_HEAD(&cle->ce_linkage);
+ cle->ce_magic = &cl_env_init0;
+ env = &cle->ce_lu;
+ rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
+ if (rc == 0) {
+ rc = lu_context_init(&cle->ce_ses,
+ LCT_SESSION | ses_tags);
+ if (rc == 0) {
+ lu_context_enter(&cle->ce_ses);
+ env->le_ses = &cle->ce_ses;
+ cl_env_init0(cle, debug);
+ } else
+ lu_env_fini(env);
+ }
+ if (rc != 0) {
+ OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
+ env = ERR_PTR(rc);
+ } else {
+ CL_ENV_INC(create);
+ CL_ENV_INC(total);
+ }
+ } else
+ env = ERR_PTR(-ENOMEM);
+ return env;
}
static void cl_env_fini(struct cl_env *cle)
OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
}
+static struct lu_env *cl_env_obtain(void *debug)
+{
+ struct cl_env *cle;
+ struct lu_env *env;
+
+ ENTRY;
+ spin_lock(&cl_envs_guard);
+ LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
+ if (cl_envs_cached_nr > 0) {
+ int rc;
+
+ cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+ cfs_list_del_init(&cle->ce_linkage);
+ cl_envs_cached_nr--;
+ spin_unlock(&cl_envs_guard);
+
+ env = &cle->ce_lu;
+ rc = lu_env_refill(env);
+ if (rc == 0) {
+ cl_env_init0(cle, debug);
+ lu_context_enter(&env->le_ctx);
+ lu_context_enter(&cle->ce_ses);
+ } else {
+ cl_env_fini(cle);
+ env = ERR_PTR(rc);
+ }
+ } else {
+ spin_unlock(&cl_envs_guard);
+ env = cl_env_new(lu_context_tags_default,
+ lu_session_tags_default, debug);
+ }
+ RETURN(env);
+}
+
static inline struct cl_env *cl_env_container(struct lu_env *env)
{
return container_of(env, struct cl_env, ce_lu);
* Returns lu_env: if there already is an environment associated with the
* current thread, it is returned, otherwise, new environment is allocated.
*
+ * Allocations are amortized through the global cache of environments.
+ *
* \param refcheck pointer to a counter used to detect environment leaks. In
* the usual case cl_env_get() and cl_env_put() are called in the same lexical
* scope and pointer to the same integer is passed as \a refcheck. This is
env = cl_env_peek(refcheck);
if (env == NULL) {
- env = cl_env_new(lu_context_tags_default,
- lu_session_tags_default,
- __builtin_return_address(0));
-
+ env = cl_env_obtain(__builtin_return_address(0));
if (!IS_ERR(env)) {
struct cl_env *cle;
}
/**
+ * Finalizes and frees a given number of cached environments. This is done to
+ * (1) free some memory (not currently hooked into VM), or (2) release
+ * references to modules.
+ */
+unsigned cl_env_cache_purge(unsigned nr)
+{
+ struct cl_env *cle;
+
+ ENTRY;
+ spin_lock(&cl_envs_guard);
+ for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) {
+ cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+ cfs_list_del_init(&cle->ce_linkage);
+ LASSERT(cl_envs_cached_nr > 0);
+ cl_envs_cached_nr--;
+ spin_unlock(&cl_envs_guard);
+
+ cl_env_fini(cle);
+ spin_lock(&cl_envs_guard);
+ }
+ LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
+ spin_unlock(&cl_envs_guard);
+ RETURN(nr);
+}
+EXPORT_SYMBOL(cl_env_cache_purge);
+
+/**
* Release an environment.
*
* Decrement \a env reference counter. When counter drops to 0, nothing in
cl_env_detach(cle);
cle->ce_debug = NULL;
cl_env_exit(cle);
- cl_env_fini(cle);
+ /*
+ * Don't bother to take a lock here.
+ *
+ * Return environment to the cache only when it was allocated
+ * with the standard tags.
+ */
+ if (cl_envs_cached_nr < cl_envs_cached_max &&
+ (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
+ (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
+ spin_lock(&cl_envs_guard);
+ cfs_list_add(&cle->ce_linkage, &cl_envs);
+ cl_envs_cached_nr++;
+ spin_unlock(&cl_envs_guard);
+ } else
+ cl_env_fini(cle);
}
}
EXPORT_SYMBOL(cl_env_put);
}
EXPORT_SYMBOL(cl_lvb2attr);
+static struct cl_env cl_env_percpu[NR_CPUS];
+
+static int cl_env_percpu_init(void)
+{
+ struct cl_env *cle;
+ int tags = LCT_REMEMBER | LCT_NOREF;
+ int i, j;
+ int rc = 0;
+
+ for_each_possible_cpu(i) {
+ struct lu_env *env;
+
+ cle = &cl_env_percpu[i];
+ env = &cle->ce_lu;
+
+ CFS_INIT_LIST_HEAD(&cle->ce_linkage);
+ cle->ce_magic = &cl_env_init0;
+ rc = lu_env_init(env, LCT_CL_THREAD | tags);
+ if (rc == 0) {
+ rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags);
+ if (rc == 0) {
+ lu_context_enter(&cle->ce_ses);
+ env->le_ses = &cle->ce_ses;
+ } else {
+ lu_env_fini(env);
+ }
+ }
+ if (rc != 0)
+ break;
+ }
+ if (rc != 0) {
+ /* Indices 0 to i (excluding i) were correctly initialized,
+ * thus we must uninitialize up to i, the rest are undefined. */
+ for (j = 0; j < i; j++) {
+ cle = &cl_env_percpu[i];
+ lu_context_exit(&cle->ce_ses);
+ lu_context_fini(&cle->ce_ses);
+ lu_env_fini(&cle->ce_lu);
+ }
+ }
+
+ return rc;
+}
+
+static void cl_env_percpu_fini(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct cl_env *cle = &cl_env_percpu[i];
+
+ lu_context_exit(&cle->ce_ses);
+ lu_context_fini(&cle->ce_ses);
+ lu_env_fini(&cle->ce_lu);
+ }
+}
+
+static void cl_env_percpu_refill(void)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ lu_env_refill(&cl_env_percpu[i].ce_lu);
+}
+
+void cl_env_percpu_put(struct lu_env *env)
+{
+ struct cl_env *cle;
+ int cpu;
+
+ cpu = smp_processor_id();
+ cle = cl_env_container(env);
+ LASSERT(cle == &cl_env_percpu[cpu]);
+
+ cle->ce_ref--;
+ LASSERT(cle->ce_ref == 0);
+
+ CL_ENV_DEC(busy);
+ cl_env_detach(cle);
+ cle->ce_debug = NULL;
+
+ put_cpu();
+}
+EXPORT_SYMBOL(cl_env_percpu_put);
+
+struct lu_env *cl_env_percpu_get()
+{
+ struct cl_env *cle;
+
+ cle = &cl_env_percpu[get_cpu()];
+ cl_env_init0(cle, __builtin_return_address(0));
+
+ cl_env_attach(cle);
+ return &cle->ce_lu;
+}
+EXPORT_SYMBOL(cl_env_percpu_get);
+
/*****************************************************************************
*
* Temporary prototype thing: mirror obd-devices into cl devices.
if (result)
goto out_lock;
+ result = cl_env_percpu_init();
+ if (result)
+ /* no cl_env_percpu_fini on error */
+ goto out_lock;
+
return 0;
out_lock:
cl_lock_fini();
*/
void cl_global_fini(void)
{
+ cl_env_percpu_fini();
cl_lock_fini();
cl_page_fini();
lu_context_key_degister(&cl_key);