4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Client Lustre Object.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
52 #define DEBUG_SUBSYSTEM S_CLASS
54 #include <libcfs/libcfs.h>
55 /* class_put_type() */
56 #include <obd_class.h>
57 #include <obd_support.h>
58 #include <lustre_fid.h>
59 #include <libcfs/list.h>
60 #include <libcfs/libcfs_hash.h> /* for cfs_hash stuff */
61 #include <cl_object.h>
62 #include "cl_internal.h"
64 static struct kmem_cache *cl_env_kmem;
66 /** Lock class of cl_object_header::coh_lock_guard */
67 static struct lock_class_key cl_lock_guard_class;
68 /** Lock class of cl_object_header::coh_attr_guard */
69 static struct lock_class_key cl_attr_guard_class;
71 extern __u32 lu_context_tags_default;
72 extern __u32 lu_session_tags_default;
74 * Initialize cl_object_header.
76 int cl_object_header_init(struct cl_object_header *h)
81 result = lu_object_header_init(&h->coh_lu);
83 spin_lock_init(&h->coh_lock_guard);
84 spin_lock_init(&h->coh_attr_guard);
85 lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
86 lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
87 CFS_INIT_LIST_HEAD(&h->coh_locks);
88 h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
92 EXPORT_SYMBOL(cl_object_header_init);
95 * Finalize cl_object_header.
97 void cl_object_header_fini(struct cl_object_header *h)
99 LASSERT(cfs_list_empty(&h->coh_locks));
100 lu_object_header_fini(&h->coh_lu);
102 EXPORT_SYMBOL(cl_object_header_fini);
105 * Returns a cl_object with a given \a fid.
107 * Returns either cached or newly created object. Additional reference on the
108 * returned object is acquired.
110 * \see lu_object_find(), cl_page_find(), cl_lock_find()
112 struct cl_object *cl_object_find(const struct lu_env *env,
113 struct cl_device *cd, const struct lu_fid *fid,
114 const struct cl_object_conf *c)
117 return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
119 EXPORT_SYMBOL(cl_object_find);
122 * Releases a reference on \a o.
124 * When last reference is released object is returned to the cache, unless
125 * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
127 * \see cl_page_put(), cl_lock_put().
129 void cl_object_put(const struct lu_env *env, struct cl_object *o)
131 lu_object_put(env, &o->co_lu);
133 EXPORT_SYMBOL(cl_object_put);
136 * Acquire an additional reference to the object \a o.
138 * This can only be used to acquire _additional_ reference, i.e., caller
139 * already has to possess at least one reference to \a o before calling this.
141 * \see cl_page_get(), cl_lock_get().
143 void cl_object_get(struct cl_object *o)
145 lu_object_get(&o->co_lu);
147 EXPORT_SYMBOL(cl_object_get);
150 * Returns the top-object for a given \a o.
152 * \see cl_page_top(), cl_io_top()
154 struct cl_object *cl_object_top(struct cl_object *o)
156 struct cl_object_header *hdr = cl_object_header(o);
157 struct cl_object *top;
159 while (hdr->coh_parent != NULL)
160 hdr = hdr->coh_parent;
162 top = lu2cl(lu_object_top(&hdr->coh_lu));
163 CDEBUG(D_TRACE, "%p -> %p\n", o, top);
166 EXPORT_SYMBOL(cl_object_top);
169 * Returns pointer to the lock protecting data-attributes for the given object
172 * Data-attributes are protected by the cl_object_header::coh_attr_guard
173 * spin-lock in the top-object.
175 * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
177 static spinlock_t *cl_object_attr_guard(struct cl_object *o)
179 return &cl_object_header(cl_object_top(o))->coh_attr_guard;
183 * Locks data-attributes.
185 * Prevents data-attributes from changing, until lock is released by
186 * cl_object_attr_unlock(). This has to be called before calls to
187 * cl_object_attr_get(), cl_object_attr_set().
189 void cl_object_attr_lock(struct cl_object *o)
191 spin_lock(cl_object_attr_guard(o));
193 EXPORT_SYMBOL(cl_object_attr_lock);
196 * Releases data-attributes lock, acquired by cl_object_attr_lock().
198 void cl_object_attr_unlock(struct cl_object *o)
200 spin_unlock(cl_object_attr_guard(o));
202 EXPORT_SYMBOL(cl_object_attr_unlock);
205 * Returns data-attributes of an object \a obj.
207 * Every layer is asked (by calling cl_object_operations::coo_attr_get())
208 * top-to-bottom to fill in parts of \a attr that this layer is responsible
211 int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
212 struct cl_attr *attr)
214 struct lu_object_header *top;
217 LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
220 top = obj->co_lu.lo_header;
222 cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
223 if (obj->co_ops->coo_attr_get != NULL) {
224 result = obj->co_ops->coo_attr_get(env, obj, attr);
234 EXPORT_SYMBOL(cl_object_attr_get);
237 * Updates data-attributes of an object \a obj.
239 * Only attributes, mentioned in a validness bit-mask \a v are
240 * updated. Calls cl_object_operations::coo_attr_set() on every layer, bottom
243 int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
244 const struct cl_attr *attr, unsigned v)
246 struct lu_object_header *top;
249 LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
252 top = obj->co_lu.lo_header;
254 cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
256 if (obj->co_ops->coo_attr_set != NULL) {
257 result = obj->co_ops->coo_attr_set(env, obj, attr, v);
267 EXPORT_SYMBOL(cl_object_attr_set);
270 * Notifies layers (bottom-to-top) that glimpse AST was received.
272 * Layers have to fill \a lvb fields with information that will be shipped
273 * back to glimpse issuer.
275 * \see cl_lock_operations::clo_glimpse()
277 int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
280 struct lu_object_header *top;
284 top = obj->co_lu.lo_header;
286 cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
288 if (obj->co_ops->coo_glimpse != NULL) {
289 result = obj->co_ops->coo_glimpse(env, obj, lvb);
294 LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
295 "size: "LPU64" mtime: "LPU64" atime: "LPU64" "
296 "ctime: "LPU64" blocks: "LPU64"\n",
297 lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
298 lvb->lvb_ctime, lvb->lvb_blocks);
301 EXPORT_SYMBOL(cl_object_glimpse);
304 * Updates a configuration of an object \a obj.
306 int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
307 const struct cl_object_conf *conf)
309 struct lu_object_header *top;
313 top = obj->co_lu.lo_header;
315 cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
316 if (obj->co_ops->coo_conf_set != NULL) {
317 result = obj->co_ops->coo_conf_set(env, obj, conf);
324 EXPORT_SYMBOL(cl_conf_set);
327 * Prunes caches of pages and locks for this object.
329 void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
331 struct lu_object_header *top;
336 top = obj->co_lu.lo_header;
338 cfs_list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
339 if (o->co_ops->coo_prune != NULL) {
340 result = o->co_ops->coo_prune(env, o);
346 /* TODO: pruning locks will be moved into layers after cl_lock
347 * simplification is done */
348 cl_locks_prune(env, obj, 1);
351 EXPORT_SYMBOL(cl_object_prune);
354 * Helper function removing all object locks, and marking object for
355 * deletion. All object pages must have been deleted at this point.
357 * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
358 * and sub- objects respectively.
360 void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
362 struct cl_object_header *hdr;
364 hdr = cl_object_header(obj);
366 set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
368 * Destroy all locks. Object destruction (including cl_inode_fini())
369 * cannot cancel the locks, because in the case of a local client,
370 * where client and server share the same thread running
371 * prune_icache(), this can dead-lock with ldlm_cancel_handler()
372 * waiting on __wait_on_freeing_inode().
374 cl_locks_prune(env, obj, 0);
376 EXPORT_SYMBOL(cl_object_kill);
379 * Check if the object has locks.
381 int cl_object_has_locks(struct cl_object *obj)
383 struct cl_object_header *head = cl_object_header(obj);
386 spin_lock(&head->coh_lock_guard);
387 has = cfs_list_empty(&head->coh_locks);
388 spin_unlock(&head->coh_lock_guard);
392 EXPORT_SYMBOL(cl_object_has_locks);
394 void cache_stats_init(struct cache_stats *cs, const char *name)
399 for (i = 0; i < CS_NR; i++)
400 cfs_atomic_set(&cs->cs_stats[i], 0);
403 int cache_stats_print(const struct cache_stats *cs,
404 char *page, int count, int h)
409 * lookup hit total cached create
410 * env: ...... ...... ...... ...... ......
413 const char *names[CS_NR] = CS_NAMES;
415 nob += snprintf(page + nob, count - nob, "%6s", " ");
416 for (i = 0; i < CS_NR; i++)
417 nob += snprintf(page + nob, count - nob,
419 nob += snprintf(page + nob, count - nob, "\n");
422 nob += snprintf(page + nob, count - nob, "%5.5s:", cs->cs_name);
423 for (i = 0; i < CS_NR; i++)
424 nob += snprintf(page + nob, count - nob, "%8u",
425 cfs_atomic_read(&cs->cs_stats[i]));
430 * Initialize client site.
432 * Perform common initialization (lu_site_init()), and initialize statistical
433 * counters. Also perform global initializations on the first call.
435 int cl_site_init(struct cl_site *s, struct cl_device *d)
440 result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
442 cache_stats_init(&s->cs_pages, "pages");
443 cache_stats_init(&s->cs_locks, "locks");
444 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
445 cfs_atomic_set(&s->cs_pages_state[0], 0);
446 for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
447 cfs_atomic_set(&s->cs_locks_state[i], 0);
451 EXPORT_SYMBOL(cl_site_init);
454 * Finalize client site. Dual to cl_site_init().
456 void cl_site_fini(struct cl_site *s)
458 lu_site_fini(&s->cs_lu);
460 EXPORT_SYMBOL(cl_site_fini);
462 static struct cache_stats cl_env_stats = {
464 .cs_stats = { CFS_ATOMIC_INIT(0), }
468 * Outputs client site statistical counters into a buffer. Suitable for
469 * ll_rd_*()-style functions.
471 int cl_site_stats_print(const struct cl_site *site, char *page, int count)
475 static const char *pstate[] = {
482 static const char *lstate[] = {
485 [CLS_ENQUEUED] = "e",
487 [CLS_INTRANSIT] = "t",
492 lookup hit total busy create
493 pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
494 locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
495 env: ...... ...... ...... ...... ......
497 nob = lu_site_stats_print(&site->cs_lu, page, count);
498 nob += cache_stats_print(&site->cs_pages, page + nob, count - nob, 1);
499 nob += snprintf(page + nob, count - nob, " [");
500 for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
501 nob += snprintf(page + nob, count - nob, "%s: %u ",
503 cfs_atomic_read(&site->cs_pages_state[i]));
504 nob += snprintf(page + nob, count - nob, "]\n");
505 nob += cache_stats_print(&site->cs_locks, page + nob, count - nob, 0);
506 nob += snprintf(page + nob, count - nob, " [");
507 for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
508 nob += snprintf(page + nob, count - nob, "%s: %u ",
510 cfs_atomic_read(&site->cs_locks_state[i]));
511 nob += snprintf(page + nob, count - nob, "]\n");
512 nob += cache_stats_print(&cl_env_stats, page + nob, count - nob, 0);
513 nob += snprintf(page + nob, count - nob, "\n");
516 EXPORT_SYMBOL(cl_site_stats_print);
518 /*****************************************************************************
520 * lu_env handling on client.
525 * The most efficient way is to store cl_env pointer in task specific
526 * structures. On Linux, it wont' be easy to use task_struct->journal_info
527 * because Lustre code may call into other fs which has certain assumptions
528 * about journal_info. Currently following fields in task_struct are identified
529 * can be used for this purpose:
530 * - cl_env: for liblustre.
531 * - tux_info: ony on RedHat kernel.
533 * \note As long as we use task_struct to store cl_env, we assume that once
534 * called into Lustre, we'll never call into the other part of the kernel
535 * which will use those fields in task_struct without explicitly exiting
538 * If there's no space in task_struct is available, hash will be used.
542 static CFS_LIST_HEAD(cl_envs);
543 static unsigned cl_envs_cached_nr = 0;
544 static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
546 static DEFINE_SPINLOCK(cl_envs_guard);
551 struct lu_context ce_ses;
553 #ifdef LL_TASK_CL_ENV
557 * This allows cl_env to be entered into cl_env_hash which implements
558 * the current thread -> client environment lookup.
560 cfs_hlist_node_t ce_node;
563 * Owner for the current cl_env.
565 * If LL_TASK_CL_ENV is defined, this point to the owning current,
566 * only for debugging purpose ;
567 * Otherwise hash is used, and this is the key for cfs_hash.
568 * Now current thread pid is stored. Note using thread pointer would
569 * lead to unbalanced hash because of its specific allocation locality
570 * and could be varied for different platforms and OSes, even different
576 * Linkage into global list of all client environments. Used for
577 * garbage collection.
579 cfs_list_t ce_linkage;
585 * Debugging field: address of the caller who made original
591 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
592 #define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.cs_stats[CS_##counter])
594 #define CL_ENV_DEC(counter) do { \
595 LASSERT(cfs_atomic_read(&cl_env_stats.cs_stats[CS_##counter]) > 0); \
596 cfs_atomic_dec(&cl_env_stats.cs_stats[CS_##counter]); \
599 #define CL_ENV_INC(counter)
600 #define CL_ENV_DEC(counter)
603 static void cl_env_init0(struct cl_env *cle, void *debug)
605 LASSERT(cle->ce_ref == 0);
606 LASSERT(cle->ce_magic == &cl_env_init0);
607 LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
610 cle->ce_debug = debug;
615 #ifndef LL_TASK_CL_ENV
617 * The implementation of using hash table to connect cl_env and thread
620 static cfs_hash_t *cl_env_hash;
622 static unsigned cl_env_hops_hash(cfs_hash_t *lh,
623 const void *key, unsigned mask)
625 #if BITS_PER_LONG == 64
626 return cfs_hash_u64_hash((__u64)key, mask);
628 return cfs_hash_u32_hash((__u32)key, mask);
632 static void *cl_env_hops_obj(cfs_hlist_node_t *hn)
634 struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node);
635 LASSERT(cle->ce_magic == &cl_env_init0);
639 static int cl_env_hops_keycmp(const void *key, cfs_hlist_node_t *hn)
641 struct cl_env *cle = cl_env_hops_obj(hn);
643 LASSERT(cle->ce_owner != NULL);
644 return (key == cle->ce_owner);
647 static void cl_env_hops_noop(cfs_hash_t *hs, cfs_hlist_node_t *hn)
649 struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node);
650 LASSERT(cle->ce_magic == &cl_env_init0);
653 static cfs_hash_ops_t cl_env_hops = {
654 .hs_hash = cl_env_hops_hash,
655 .hs_key = cl_env_hops_obj,
656 .hs_keycmp = cl_env_hops_keycmp,
657 .hs_object = cl_env_hops_obj,
658 .hs_get = cl_env_hops_noop,
659 .hs_put_locked = cl_env_hops_noop,
662 static inline struct cl_env *cl_env_fetch(void)
666 cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid);
667 LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
671 static inline void cl_env_attach(struct cl_env *cle)
676 LASSERT(cle->ce_owner == NULL);
677 cle->ce_owner = (void *) (long) current->pid;
678 rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
684 static inline void cl_env_do_detach(struct cl_env *cle)
688 LASSERT(cle->ce_owner == (void *) (long) current->pid);
689 cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
691 LASSERT(cookie == cle);
692 cle->ce_owner = NULL;
695 static int cl_env_store_init(void) {
696 cl_env_hash = cfs_hash_create("cl_env",
697 HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
698 HASH_CL_ENV_BKT_BITS, 0,
702 CFS_HASH_RW_BKTLOCK);
703 return cl_env_hash != NULL ? 0 :-ENOMEM;
706 static void cl_env_store_fini(void) {
707 cfs_hash_putref(cl_env_hash);
710 #else /* LL_TASK_CL_ENV */
712 * The implementation of store cl_env directly in thread structure.
715 static inline struct cl_env *cl_env_fetch(void)
719 cle = current->LL_TASK_CL_ENV;
720 if (cle && cle->ce_magic != &cl_env_init0)
725 static inline void cl_env_attach(struct cl_env *cle)
728 LASSERT(cle->ce_owner == NULL);
729 cle->ce_owner = current;
730 cle->ce_prev = current->LL_TASK_CL_ENV;
731 current->LL_TASK_CL_ENV = cle;
735 static inline void cl_env_do_detach(struct cl_env *cle)
737 LASSERT(cle->ce_owner == current);
738 LASSERT(current->LL_TASK_CL_ENV == cle);
739 current->LL_TASK_CL_ENV = cle->ce_prev;
740 cle->ce_owner = NULL;
743 static int cl_env_store_init(void) { return 0; }
744 static void cl_env_store_fini(void) { }
746 #endif /* LL_TASK_CL_ENV */
748 static inline struct cl_env *cl_env_detach(struct cl_env *cle)
751 cle = cl_env_fetch();
753 if (cle && cle->ce_owner)
754 cl_env_do_detach(cle);
759 static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
764 OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, __GFP_IO);
768 CFS_INIT_LIST_HEAD(&cle->ce_linkage);
769 cle->ce_magic = &cl_env_init0;
771 rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
773 rc = lu_context_init(&cle->ce_ses,
774 LCT_SESSION | ses_tags);
776 lu_context_enter(&cle->ce_ses);
777 env->le_ses = &cle->ce_ses;
778 cl_env_init0(cle, debug);
783 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
790 env = ERR_PTR(-ENOMEM);
794 static void cl_env_fini(struct cl_env *cle)
797 lu_context_fini(&cle->ce_lu.le_ctx);
798 lu_context_fini(&cle->ce_ses);
799 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
802 static struct lu_env *cl_env_obtain(void *debug)
808 spin_lock(&cl_envs_guard);
809 LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
810 if (cl_envs_cached_nr > 0) {
813 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
814 cfs_list_del_init(&cle->ce_linkage);
816 spin_unlock(&cl_envs_guard);
819 rc = lu_env_refill(env);
821 cl_env_init0(cle, debug);
822 lu_context_enter(&env->le_ctx);
823 lu_context_enter(&cle->ce_ses);
829 spin_unlock(&cl_envs_guard);
830 env = cl_env_new(lu_context_tags_default,
831 lu_session_tags_default, debug);
836 static inline struct cl_env *cl_env_container(struct lu_env *env)
838 return container_of(env, struct cl_env, ce_lu);
841 struct lu_env *cl_env_peek(int *refcheck)
848 /* check that we don't go far from untrusted pointer */
849 CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
852 cle = cl_env_fetch();
856 *refcheck = ++cle->ce_ref;
858 CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
861 EXPORT_SYMBOL(cl_env_peek);
864 * Returns lu_env: if there already is an environment associated with the
865 * current thread, it is returned, otherwise, new environment is allocated.
867 * Allocations are amortized through the global cache of environments.
869 * \param refcheck pointer to a counter used to detect environment leaks. In
870 * the usual case cl_env_get() and cl_env_put() are called in the same lexical
871 * scope and pointer to the same integer is passed as \a refcheck. This is
872 * used to detect missed cl_env_put().
876 struct lu_env *cl_env_get(int *refcheck)
880 env = cl_env_peek(refcheck);
882 env = cl_env_obtain(__builtin_return_address(0));
886 cle = cl_env_container(env);
888 *refcheck = cle->ce_ref;
889 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
894 EXPORT_SYMBOL(cl_env_get);
897 * Forces an allocation of a fresh environment with given tags.
901 struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
905 LASSERT(cl_env_peek(refcheck) == NULL);
906 env = cl_env_new(tags, tags, __builtin_return_address(0));
910 cle = cl_env_container(env);
911 *refcheck = cle->ce_ref;
912 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
916 EXPORT_SYMBOL(cl_env_alloc);
918 static void cl_env_exit(struct cl_env *cle)
920 LASSERT(cle->ce_owner == NULL);
921 lu_context_exit(&cle->ce_lu.le_ctx);
922 lu_context_exit(&cle->ce_ses);
926 * Finalizes and frees a given number of cached environments. This is done to
927 * (1) free some memory (not currently hooked into VM), or (2) release
928 * references to modules.
930 unsigned cl_env_cache_purge(unsigned nr)
935 spin_lock(&cl_envs_guard);
936 for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) {
937 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
938 cfs_list_del_init(&cle->ce_linkage);
939 LASSERT(cl_envs_cached_nr > 0);
941 spin_unlock(&cl_envs_guard);
944 spin_lock(&cl_envs_guard);
946 LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
947 spin_unlock(&cl_envs_guard);
950 EXPORT_SYMBOL(cl_env_cache_purge);
953 * Release an environment.
955 * Decrement \a env reference counter. When counter drops to 0, nothing in
956 * this thread is using environment and it is returned to the allocation
957 * cache, or freed straight away, if cache is large enough.
959 void cl_env_put(struct lu_env *env, int *refcheck)
963 cle = cl_env_container(env);
965 LASSERT(cle->ce_ref > 0);
966 LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
968 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
969 if (--cle->ce_ref == 0) {
972 cle->ce_debug = NULL;
975 * Don't bother to take a lock here.
977 * Return environment to the cache only when it was allocated
978 * with the standard tags.
980 if (cl_envs_cached_nr < cl_envs_cached_max &&
981 (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
982 (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
983 spin_lock(&cl_envs_guard);
984 cfs_list_add(&cle->ce_linkage, &cl_envs);
986 spin_unlock(&cl_envs_guard);
991 EXPORT_SYMBOL(cl_env_put);
994 * Declares a point of re-entrancy.
996 * \see cl_env_reexit()
998 void *cl_env_reenter(void)
1000 return cl_env_detach(NULL);
1002 EXPORT_SYMBOL(cl_env_reenter);
1005 * Exits re-entrancy.
1007 void cl_env_reexit(void *cookie)
1009 cl_env_detach(NULL);
1010 cl_env_attach(cookie);
1012 EXPORT_SYMBOL(cl_env_reexit);
1015 * Setup user-supplied \a env as a current environment. This is to be used to
1016 * guaranteed that environment exists even when cl_env_get() fails. It is up
1017 * to user to ensure proper concurrency control.
1019 * \see cl_env_unplant()
1021 void cl_env_implant(struct lu_env *env, int *refcheck)
1023 struct cl_env *cle = cl_env_container(env);
1025 LASSERT(cle->ce_ref > 0);
1028 cl_env_get(refcheck);
1029 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
1031 EXPORT_SYMBOL(cl_env_implant);
1034 * Detach environment installed earlier by cl_env_implant().
1036 void cl_env_unplant(struct lu_env *env, int *refcheck)
1038 struct cl_env *cle = cl_env_container(env);
1040 LASSERT(cle->ce_ref > 1);
1042 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
1045 cl_env_put(env, refcheck);
1047 EXPORT_SYMBOL(cl_env_unplant);
1049 struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
1053 nest->cen_cookie = NULL;
1054 env = cl_env_peek(&nest->cen_refcheck);
1056 if (!cl_io_is_going(env))
1059 cl_env_put(env, &nest->cen_refcheck);
1060 nest->cen_cookie = cl_env_reenter();
1063 env = cl_env_get(&nest->cen_refcheck);
1065 cl_env_reexit(nest->cen_cookie);
1069 LASSERT(!cl_io_is_going(env));
1072 EXPORT_SYMBOL(cl_env_nested_get);
1074 void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
1076 cl_env_put(env, &nest->cen_refcheck);
1077 cl_env_reexit(nest->cen_cookie);
1079 EXPORT_SYMBOL(cl_env_nested_put);
1082 * Converts struct cl_attr to struct ost_lvb.
1086 void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr)
1089 lvb->lvb_size = attr->cat_size;
1090 lvb->lvb_mtime = attr->cat_mtime;
1091 lvb->lvb_atime = attr->cat_atime;
1092 lvb->lvb_ctime = attr->cat_ctime;
1093 lvb->lvb_blocks = attr->cat_blocks;
1096 EXPORT_SYMBOL(cl_attr2lvb);
1099 * Converts struct ost_lvb to struct cl_attr.
1103 void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
1106 attr->cat_size = lvb->lvb_size;
1107 attr->cat_mtime = lvb->lvb_mtime;
1108 attr->cat_atime = lvb->lvb_atime;
1109 attr->cat_ctime = lvb->lvb_ctime;
1110 attr->cat_blocks = lvb->lvb_blocks;
1113 EXPORT_SYMBOL(cl_lvb2attr);
1115 /*****************************************************************************
1117 * Temporary prototype thing: mirror obd-devices into cl devices.
1121 struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
1122 struct lu_device_type *ldt,
1123 struct lu_device *next)
1125 const char *typename;
1126 struct lu_device *d;
1128 LASSERT(ldt != NULL);
1130 typename = ldt->ldt_name;
1131 d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
1137 rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
1140 lu_ref_add(&d->ld_reference,
1141 "lu-stack", &lu_site_init);
1143 ldt->ldt_ops->ldto_device_free(env, d);
1144 CERROR("can't init device '%s', %d\n", typename, rc);
1148 CERROR("Cannot allocate device: '%s'\n", typename);
1149 return lu2cl_dev(d);
1151 EXPORT_SYMBOL(cl_type_setup);
1154 * Finalize device stack by calling lu_stack_fini().
1156 void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
1158 lu_stack_fini(env, cl2lu_dev(cl));
1160 EXPORT_SYMBOL(cl_stack_fini);
1162 int cl_lock_init(void);
1163 void cl_lock_fini(void);
1165 int cl_page_init(void);
1166 void cl_page_fini(void);
1168 static struct lu_context_key cl_key;
1170 struct cl_thread_info *cl_env_info(const struct lu_env *env)
1172 return lu_context_key_get(&env->le_ctx, &cl_key);
1175 /* defines cl0_key_{init,fini}() */
1176 LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
1178 static void *cl_key_init(const struct lu_context *ctx,
1179 struct lu_context_key *key)
1181 struct cl_thread_info *info;
1183 info = cl0_key_init(ctx, key);
1184 if (!IS_ERR(info)) {
1187 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1188 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1193 static void cl_key_fini(const struct lu_context *ctx,
1194 struct lu_context_key *key, void *data)
1196 struct cl_thread_info *info;
1200 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1201 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1202 cl0_key_fini(ctx, key, data);
1205 static void cl_key_exit(const struct lu_context *ctx,
1206 struct lu_context_key *key, void *data)
1208 struct cl_thread_info *info = data;
1211 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
1212 LASSERT(info->clt_counters[i].ctc_nr_held == 0);
1213 LASSERT(info->clt_counters[i].ctc_nr_used == 0);
1214 LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
1215 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
1216 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1217 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1221 static struct lu_context_key cl_key = {
1222 .lct_tags = LCT_CL_THREAD,
1223 .lct_init = cl_key_init,
1224 .lct_fini = cl_key_fini,
1225 .lct_exit = cl_key_exit
1228 static struct lu_kmem_descr cl_object_caches[] = {
1230 .ckd_cache = &cl_env_kmem,
1231 .ckd_name = "cl_env_kmem",
1232 .ckd_size = sizeof (struct cl_env)
1240 * Global initialization of cl-data. Create kmem caches, register
1241 * lu_context_key's, etc.
1243 * \see cl_global_fini()
1245 int cl_global_init(void)
1249 result = cl_env_store_init();
1253 result = lu_kmem_init(cl_object_caches);
1257 LU_CONTEXT_KEY_INIT(&cl_key);
1258 result = lu_context_key_register(&cl_key);
1262 result = cl_lock_init();
1266 result = cl_page_init();
1274 lu_context_key_degister(&cl_key);
1276 lu_kmem_fini(cl_object_caches);
1278 cl_env_store_fini();
1283 * Finalization of global cl-data. Dual to cl_global_init().
1285 void cl_global_fini(void)
1289 lu_context_key_degister(&cl_key);
1290 lu_kmem_fini(cl_object_caches);
1291 cl_env_store_fini();