1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Client Lustre Object.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
52 #define DEBUG_SUBSYSTEM S_CLASS
54 # define EXPORT_SYMTAB
57 #include <libcfs/libcfs.h>
58 /* class_put_type() */
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre_fid.h>
62 #include <libcfs/list.h>
63 #include <libcfs/libcfs_hash.h> /* for cfs_hash stuff */
64 /* lu_time_global_{init,fini}() */
67 #include <cl_object.h>
68 #include "cl_internal.h"
70 static cfs_mem_cache_t *cl_env_kmem;
72 /** Lock class of cl_object_header::coh_page_guard */
73 static cfs_lock_class_key_t cl_page_guard_class;
74 /** Lock class of cl_object_header::coh_lock_guard */
75 static cfs_lock_class_key_t cl_lock_guard_class;
76 /** Lock class of cl_object_header::coh_attr_guard */
77 static cfs_lock_class_key_t cl_attr_guard_class;
80 * Initialize cl_object_header.
82 int cl_object_header_init(struct cl_object_header *h)
87 result = lu_object_header_init(&h->coh_lu);
89 cfs_spin_lock_init(&h->coh_page_guard);
90 cfs_spin_lock_init(&h->coh_lock_guard);
91 cfs_spin_lock_init(&h->coh_attr_guard);
92 cfs_lockdep_set_class(&h->coh_attr_guard, &cl_page_guard_class);
93 cfs_lockdep_set_class(&h->coh_attr_guard, &cl_lock_guard_class);
94 cfs_lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
96 /* XXX hard coded GFP_* mask. */
97 INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
98 CFS_INIT_LIST_HEAD(&h->coh_locks);
102 EXPORT_SYMBOL(cl_object_header_init);
105 * Finalize cl_object_header.
107 void cl_object_header_fini(struct cl_object_header *h)
109 LASSERT(cfs_list_empty(&h->coh_locks));
110 lu_object_header_fini(&h->coh_lu);
112 EXPORT_SYMBOL(cl_object_header_fini);
115 * Returns a cl_object with a given \a fid.
117 * Returns either cached or newly created object. Additional reference on the
118 * returned object is acquired.
120 * \see lu_object_find(), cl_page_find(), cl_lock_find()
122 struct cl_object *cl_object_find(const struct lu_env *env,
123 struct cl_device *cd, const struct lu_fid *fid,
124 const struct cl_object_conf *c)
127 return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
129 EXPORT_SYMBOL(cl_object_find);
132 * Releases a reference on \a o.
134 * When last reference is released object is returned to the cache, unless
135 * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
137 * \see cl_page_put(), cl_lock_put().
139 void cl_object_put(const struct lu_env *env, struct cl_object *o)
141 lu_object_put(env, &o->co_lu);
143 EXPORT_SYMBOL(cl_object_put);
146 * Acquire an additional reference to the object \a o.
148 * This can only be used to acquire _additional_ reference, i.e., caller
149 * already has to possess at least one reference to \a o before calling this.
151 * \see cl_page_get(), cl_lock_get().
153 void cl_object_get(struct cl_object *o)
155 lu_object_get(&o->co_lu);
157 EXPORT_SYMBOL(cl_object_get);
160 * Returns the top-object for a given \a o.
162 * \see cl_page_top(), cl_io_top()
164 struct cl_object *cl_object_top(struct cl_object *o)
166 struct cl_object_header *hdr = cl_object_header(o);
167 struct cl_object *top;
169 while (hdr->coh_parent != NULL)
170 hdr = hdr->coh_parent;
172 top = lu2cl(lu_object_top(&hdr->coh_lu));
173 CDEBUG(D_TRACE, "%p -> %p\n", o, top);
176 EXPORT_SYMBOL(cl_object_top);
179 * Returns pointer to the lock protecting data-attributes for the given object
182 * Data-attributes are protected by the cl_object_header::coh_attr_guard
183 * spin-lock in the top-object.
185 * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
187 static cfs_spinlock_t *cl_object_attr_guard(struct cl_object *o)
189 return &cl_object_header(cl_object_top(o))->coh_attr_guard;
193 * Locks data-attributes.
195 * Prevents data-attributes from changing, until lock is released by
196 * cl_object_attr_unlock(). This has to be called before calls to
197 * cl_object_attr_get(), cl_object_attr_set().
199 void cl_object_attr_lock(struct cl_object *o)
201 cfs_spin_lock(cl_object_attr_guard(o));
203 EXPORT_SYMBOL(cl_object_attr_lock);
206 * Releases data-attributes lock, acquired by cl_object_attr_lock().
208 void cl_object_attr_unlock(struct cl_object *o)
210 cfs_spin_unlock(cl_object_attr_guard(o));
212 EXPORT_SYMBOL(cl_object_attr_unlock);
215 * Returns data-attributes of an object \a obj.
217 * Every layer is asked (by calling cl_object_operations::coo_attr_get())
218 * top-to-bottom to fill in parts of \a attr that this layer is responsible
221 int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
222 struct cl_attr *attr)
224 struct lu_object_header *top;
227 LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj));
230 top = obj->co_lu.lo_header;
232 cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
233 if (obj->co_ops->coo_attr_get != NULL) {
234 result = obj->co_ops->coo_attr_get(env, obj, attr);
244 EXPORT_SYMBOL(cl_object_attr_get);
247 * Updates data-attributes of an object \a obj.
249 * Only attributes, mentioned in a validness bit-mask \a v are
250 * updated. Calls cl_object_operations::coo_attr_set() on every layer, bottom
253 int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
254 const struct cl_attr *attr, unsigned v)
256 struct lu_object_header *top;
259 LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj));
262 top = obj->co_lu.lo_header;
264 cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
266 if (obj->co_ops->coo_attr_set != NULL) {
267 result = obj->co_ops->coo_attr_set(env, obj, attr, v);
277 EXPORT_SYMBOL(cl_object_attr_set);
280 * Notifies layers (bottom-to-top) that glimpse AST was received.
282 * Layers have to fill \a lvb fields with information that will be shipped
283 * back to glimpse issuer.
285 * \see cl_lock_operations::clo_glimpse()
287 int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
290 struct lu_object_header *top;
294 top = obj->co_lu.lo_header;
296 cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
298 if (obj->co_ops->coo_glimpse != NULL) {
299 result = obj->co_ops->coo_glimpse(env, obj, lvb);
304 LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
305 "size: "LPU64" mtime: "LPU64" atime: "LPU64" "
306 "ctime: "LPU64" blocks: "LPU64"\n",
307 lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
308 lvb->lvb_ctime, lvb->lvb_blocks);
311 EXPORT_SYMBOL(cl_object_glimpse);
314 * Updates a configuration of an object \a obj.
316 int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
317 const struct cl_object_conf *conf)
319 struct lu_object_header *top;
323 top = obj->co_lu.lo_header;
325 cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
326 if (obj->co_ops->coo_conf_set != NULL) {
327 result = obj->co_ops->coo_conf_set(env, obj, conf);
334 EXPORT_SYMBOL(cl_conf_set);
337 * Helper function removing all object locks, and marking object for
338 * deletion. All object pages must have been deleted at this point.
340 * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
341 * and sub- objects respectively.
343 void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
345 struct cl_object_header *hdr;
347 hdr = cl_object_header(obj);
348 LASSERT(hdr->coh_tree.rnode == NULL);
349 LASSERT(hdr->coh_pages == 0);
351 cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
353 * Destroy all locks. Object destruction (including cl_inode_fini())
354 * cannot cancel the locks, because in the case of a local client,
355 * where client and server share the same thread running
356 * prune_icache(), this can dead-lock with ldlm_cancel_handler()
357 * waiting on __wait_on_freeing_inode().
359 cl_locks_prune(env, obj, 0);
361 EXPORT_SYMBOL(cl_object_kill);
364 * Prunes caches of pages and locks for this object.
366 void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
369 cl_pages_prune(env, obj);
370 cl_locks_prune(env, obj, 1);
373 EXPORT_SYMBOL(cl_object_prune);
376 * Check if the object has locks.
378 int cl_object_has_locks(struct cl_object *obj)
380 struct cl_object_header *head = cl_object_header(obj);
383 cfs_spin_lock(&head->coh_lock_guard);
384 has = cfs_list_empty(&head->coh_locks);
385 cfs_spin_unlock(&head->coh_lock_guard);
389 EXPORT_SYMBOL(cl_object_has_locks);
391 void cache_stats_init(struct cache_stats *cs, const char *name)
394 cfs_atomic_set(&cs->cs_lookup, 0);
395 cfs_atomic_set(&cs->cs_hit, 0);
396 cfs_atomic_set(&cs->cs_total, 0);
397 cfs_atomic_set(&cs->cs_busy, 0);
400 int cache_stats_print(const struct cache_stats *cs,
401 char *page, int count, int h)
405 lookup hit total cached create
406 env: ...... ...... ...... ...... ......
409 nob += snprintf(page, count,
410 " lookup hit total busy create\n");
412 nob += snprintf(page + nob, count - nob,
413 "%5.5s: %6u %6u %6u %6u %6u",
415 cfs_atomic_read(&cs->cs_lookup),
416 cfs_atomic_read(&cs->cs_hit),
417 cfs_atomic_read(&cs->cs_total),
418 cfs_atomic_read(&cs->cs_busy),
419 cfs_atomic_read(&cs->cs_created));
424 * Initialize client site.
426 * Perform common initialization (lu_site_init()), and initialize statistical
427 * counters. Also perform global initializations on the first call.
429 int cl_site_init(struct cl_site *s, struct cl_device *d)
434 result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
436 cache_stats_init(&s->cs_pages, "pages");
437 cache_stats_init(&s->cs_locks, "locks");
438 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
439 cfs_atomic_set(&s->cs_pages_state[0], 0);
440 for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
441 cfs_atomic_set(&s->cs_locks_state[i], 0);
445 EXPORT_SYMBOL(cl_site_init);
448 * Finalize client site. Dual to cl_site_init().
450 void cl_site_fini(struct cl_site *s)
452 lu_site_fini(&s->cs_lu);
454 EXPORT_SYMBOL(cl_site_fini);
456 static struct cache_stats cl_env_stats = {
458 .cs_created = CFS_ATOMIC_INIT(0),
459 .cs_lookup = CFS_ATOMIC_INIT(0),
460 .cs_hit = CFS_ATOMIC_INIT(0),
461 .cs_total = CFS_ATOMIC_INIT(0),
462 .cs_busy = CFS_ATOMIC_INIT(0)
466 * Outputs client site statistical counters into a buffer. Suitable for
467 * ll_rd_*()-style functions.
469 int cl_site_stats_print(const struct cl_site *site, char *page, int count)
473 static const char *pstate[] = {
480 static const char *lstate[] = {
483 [CLS_ENQUEUED] = "e",
485 [CLS_INTRANSIT] = "t",
490 lookup hit total busy create
491 pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
492 locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
493 env: ...... ...... ...... ...... ......
495 nob = lu_site_stats_print(&site->cs_lu, page, count);
496 nob += cache_stats_print(&site->cs_pages, page + nob, count - nob, 1);
497 nob += snprintf(page + nob, count - nob, " [");
498 for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
499 nob += snprintf(page + nob, count - nob, "%s: %u ",
501 cfs_atomic_read(&site->cs_pages_state[i]));
502 nob += snprintf(page + nob, count - nob, "]\n");
503 nob += cache_stats_print(&site->cs_locks, page + nob, count - nob, 0);
504 nob += snprintf(page + nob, count - nob, " [");
505 for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
506 nob += snprintf(page + nob, count - nob, "%s: %u ",
508 cfs_atomic_read(&site->cs_locks_state[i]));
509 nob += snprintf(page + nob, count - nob, "]\n");
510 nob += cache_stats_print(&cl_env_stats, page + nob, count - nob, 0);
511 nob += snprintf(page + nob, count - nob, "\n");
514 EXPORT_SYMBOL(cl_site_stats_print);
516 /*****************************************************************************
518 * lu_env handling on client.
523 * The most efficient way is to store cl_env pointer in task specific
524 * structures. On Linux, it wont' be easy to use task_struct->journal_info
525 * because Lustre code may call into other fs which has certain assumptions
526 * about journal_info. Currently following fields in task_struct are identified
527 * can be used for this purpose:
528 * - cl_env: for liblustre.
529 * - tux_info: ony on RedHat kernel.
531 * \note As long as we use task_struct to store cl_env, we assume that once
532 * called into Lustre, we'll never call into the other part of the kernel
533 * which will use those fields in task_struct without explicitly exiting
536 * If there's no space in task_struct is available, hash will be used.
540 static CFS_LIST_HEAD(cl_envs);
541 static unsigned cl_envs_cached_nr = 0;
542 static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
544 static cfs_spinlock_t cl_envs_guard = CFS_SPIN_LOCK_UNLOCKED;
549 struct lu_context ce_ses;
551 #ifdef LL_TASK_CL_ENV
555 * This allows cl_env to be entered into cl_env_hash which implements
556 * the current thread -> client environment lookup.
558 cfs_hlist_node_t ce_node;
561 * Owner for the current cl_env.
563 * If LL_TASK_CL_ENV is defined, this point to the owning cfs_current(),
564 * only for debugging purpose ;
565 * Otherwise hash is used, and this is the key for cfs_hash.
566 * Now current thread pid is stored. Note using thread pointer would
567 * lead to unbalanced hash because of its specific allocation locality
568 * and could be varied for different platforms and OSes, even different
574 * Linkage into global list of all client environments. Used for
575 * garbage collection.
577 cfs_list_t ce_linkage;
583 * Debugging field: address of the caller who made original
589 #define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.counter)
591 #define CL_ENV_DEC(counter) \
593 LASSERT(cfs_atomic_read(&cl_env_stats.counter) > 0); \
594 cfs_atomic_dec(&cl_env_stats.counter); \
597 static void cl_env_init0(struct cl_env *cle, void *debug)
599 LASSERT(cle->ce_ref == 0);
600 LASSERT(cle->ce_magic == &cl_env_init0);
601 LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
604 cle->ce_debug = debug;
609 #ifndef LL_TASK_CL_ENV
611 * The implementation of using hash table to connect cl_env and thread
614 static cfs_hash_t *cl_env_hash;
616 static unsigned cl_env_hops_hash(cfs_hash_t *lh, void *key, unsigned mask)
618 #if BITS_PER_LONG == 64
619 return cfs_hash_u64_hash((__u64)key, mask);
621 return cfs_hash_u32_hash((__u32)key, mask);
625 static void *cl_env_hops_obj(cfs_hlist_node_t *hn)
627 struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node);
628 LASSERT(cle->ce_magic == &cl_env_init0);
632 static int cl_env_hops_keycmp(void *key, cfs_hlist_node_t *hn)
634 struct cl_env *cle = cl_env_hops_obj(hn);
636 LASSERT(cle->ce_owner != NULL);
637 return (key == cle->ce_owner);
640 static cfs_hash_ops_t cl_env_hops = {
641 .hs_hash = cl_env_hops_hash,
642 .hs_key = cl_env_hops_obj,
643 .hs_keycmp = cl_env_hops_keycmp,
644 .hs_object = cl_env_hops_obj,
645 .hs_get = cl_env_hops_obj,
646 .hs_put_locked = cl_env_hops_obj,
649 static inline struct cl_env *cl_env_fetch(void)
653 cle = cfs_hash_lookup(cl_env_hash, (void *) (long) cfs_current()->pid);
654 LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
658 static inline void cl_env_attach(struct cl_env *cle)
663 LASSERT(cle->ce_owner == NULL);
664 cle->ce_owner = (void *) (long) cfs_current()->pid;
665 rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
671 static inline void cl_env_do_detach(struct cl_env *cle)
675 LASSERT(cle->ce_owner == (void *) (long) cfs_current()->pid);
676 cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
678 LASSERT(cookie == cle);
679 cle->ce_owner = NULL;
682 static int cl_env_store_init(void) {
683 cl_env_hash = cfs_hash_create("cl_env",
684 HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
685 HASH_CL_ENV_BKT_BITS, 0,
689 CFS_HASH_RW_BKTLOCK);
690 return cl_env_hash != NULL ? 0 :-ENOMEM;
693 static void cl_env_store_fini(void) {
694 cfs_hash_putref(cl_env_hash);
697 #else /* LL_TASK_CL_ENV */
699 * The implementation of store cl_env directly in thread structure.
702 static inline struct cl_env *cl_env_fetch(void)
706 cle = cfs_current()->LL_TASK_CL_ENV;
707 if (cle && cle->ce_magic != &cl_env_init0)
712 static inline void cl_env_attach(struct cl_env *cle)
715 LASSERT(cle->ce_owner == NULL);
716 cle->ce_owner = cfs_current();
717 cle->ce_prev = cfs_current()->LL_TASK_CL_ENV;
718 cfs_current()->LL_TASK_CL_ENV = cle;
722 static inline void cl_env_do_detach(struct cl_env *cle)
724 LASSERT(cle->ce_owner == cfs_current());
725 LASSERT(cfs_current()->LL_TASK_CL_ENV == cle);
726 cfs_current()->LL_TASK_CL_ENV = cle->ce_prev;
727 cle->ce_owner = NULL;
730 static int cl_env_store_init(void) { return 0; }
731 static void cl_env_store_fini(void) { }
733 #endif /* LL_TASK_CL_ENV */
735 static inline struct cl_env *cl_env_detach(struct cl_env *cle)
738 cle = cl_env_fetch();
740 if (cle && cle->ce_owner)
741 cl_env_do_detach(cle);
746 static struct lu_env *cl_env_new(__u32 tags, void *debug)
751 OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, CFS_ALLOC_IO);
755 CFS_INIT_LIST_HEAD(&cle->ce_linkage);
756 cle->ce_magic = &cl_env_init0;
758 rc = lu_env_init(env, LCT_CL_THREAD|tags);
760 rc = lu_context_init(&cle->ce_ses, LCT_SESSION|tags);
762 lu_context_enter(&cle->ce_ses);
763 env->le_ses = &cle->ce_ses;
764 cl_env_init0(cle, debug);
769 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
772 CL_ENV_INC(cs_created);
773 CL_ENV_INC(cs_total);
776 env = ERR_PTR(-ENOMEM);
780 static void cl_env_fini(struct cl_env *cle)
782 CL_ENV_DEC(cs_total);
783 lu_context_fini(&cle->ce_lu.le_ctx);
784 lu_context_fini(&cle->ce_ses);
785 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
788 static struct lu_env *cl_env_obtain(void *debug)
794 cfs_spin_lock(&cl_envs_guard);
795 LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
796 if (cl_envs_cached_nr > 0) {
799 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
800 cfs_list_del_init(&cle->ce_linkage);
802 cfs_spin_unlock(&cl_envs_guard);
805 rc = lu_env_refill(env);
807 cl_env_init0(cle, debug);
808 lu_context_enter(&env->le_ctx);
809 lu_context_enter(&cle->ce_ses);
815 cfs_spin_unlock(&cl_envs_guard);
816 env = cl_env_new(0, debug);
821 static inline struct cl_env *cl_env_container(struct lu_env *env)
823 return container_of(env, struct cl_env, ce_lu);
826 struct lu_env *cl_env_peek(int *refcheck)
831 CL_ENV_INC(cs_lookup);
833 /* check that we don't go far from untrusted pointer */
834 CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
837 cle = cl_env_fetch();
841 *refcheck = ++cle->ce_ref;
843 CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
846 EXPORT_SYMBOL(cl_env_peek);
849 * Returns lu_env: if there already is an environment associated with the
850 * current thread, it is returned, otherwise, new environment is allocated.
852 * Allocations are amortized through the global cache of environments.
854 * \param refcheck pointer to a counter used to detect environment leaks. In
855 * the usual case cl_env_get() and cl_env_put() are called in the same lexical
856 * scope and pointer to the same integer is passed as \a refcheck. This is
857 * used to detect missed cl_env_put().
861 struct lu_env *cl_env_get(int *refcheck)
865 env = cl_env_peek(refcheck);
867 env = cl_env_obtain(__builtin_return_address(0));
871 cle = cl_env_container(env);
873 *refcheck = cle->ce_ref;
874 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
879 EXPORT_SYMBOL(cl_env_get);
882 * Forces an allocation of a fresh environment with given tags.
886 struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
890 LASSERT(cl_env_peek(refcheck) == NULL);
891 env = cl_env_new(tags, __builtin_return_address(0));
895 cle = cl_env_container(env);
896 *refcheck = cle->ce_ref;
897 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
901 EXPORT_SYMBOL(cl_env_alloc);
903 static void cl_env_exit(struct cl_env *cle)
905 LASSERT(cle->ce_owner == NULL);
906 lu_context_exit(&cle->ce_lu.le_ctx);
907 lu_context_exit(&cle->ce_ses);
911 * Finalizes and frees a given number of cached environments. This is done to
912 * (1) free some memory (not currently hooked into VM), or (2) release
913 * references to modules.
915 unsigned cl_env_cache_purge(unsigned nr)
920 cfs_spin_lock(&cl_envs_guard);
921 for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) {
922 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
923 cfs_list_del_init(&cle->ce_linkage);
924 LASSERT(cl_envs_cached_nr > 0);
926 cfs_spin_unlock(&cl_envs_guard);
929 cfs_spin_lock(&cl_envs_guard);
931 LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
932 cfs_spin_unlock(&cl_envs_guard);
935 EXPORT_SYMBOL(cl_env_cache_purge);
938 * Release an environment.
940 * Decrement \a env reference counter. When counter drops to 0, nothing in
941 * this thread is using environment and it is returned to the allocation
942 * cache, or freed straight away, if cache is large enough.
944 void cl_env_put(struct lu_env *env, int *refcheck)
948 cle = cl_env_container(env);
950 LASSERT(cle->ce_ref > 0);
951 LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
953 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
954 if (--cle->ce_ref == 0) {
957 cle->ce_debug = NULL;
960 * Don't bother to take a lock here.
962 * Return environment to the cache only when it was allocated
963 * with the standard tags.
965 if (cl_envs_cached_nr < cl_envs_cached_max &&
966 (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
967 (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
968 cfs_spin_lock(&cl_envs_guard);
969 cfs_list_add(&cle->ce_linkage, &cl_envs);
971 cfs_spin_unlock(&cl_envs_guard);
976 EXPORT_SYMBOL(cl_env_put);
979 * Declares a point of re-entrancy.
981 * \see cl_env_reexit()
983 void *cl_env_reenter(void)
985 return cl_env_detach(NULL);
987 EXPORT_SYMBOL(cl_env_reenter);
992 void cl_env_reexit(void *cookie)
995 cl_env_attach(cookie);
997 EXPORT_SYMBOL(cl_env_reexit);
1000 * Setup user-supplied \a env as a current environment. This is to be used to
1001 * guaranteed that environment exists even when cl_env_get() fails. It is up
1002 * to user to ensure proper concurrency control.
1004 * \see cl_env_unplant()
1006 void cl_env_implant(struct lu_env *env, int *refcheck)
1008 struct cl_env *cle = cl_env_container(env);
1010 LASSERT(cle->ce_ref > 0);
1013 cl_env_get(refcheck);
1014 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
1016 EXPORT_SYMBOL(cl_env_implant);
1019 * Detach environment installed earlier by cl_env_implant().
1021 void cl_env_unplant(struct lu_env *env, int *refcheck)
1023 struct cl_env *cle = cl_env_container(env);
1025 LASSERT(cle->ce_ref > 1);
1027 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
1030 cl_env_put(env, refcheck);
1032 EXPORT_SYMBOL(cl_env_unplant);
1034 struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
1038 nest->cen_cookie = NULL;
1039 env = cl_env_peek(&nest->cen_refcheck);
1041 if (!cl_io_is_going(env))
1044 cl_env_put(env, &nest->cen_refcheck);
1045 nest->cen_cookie = cl_env_reenter();
1048 env = cl_env_get(&nest->cen_refcheck);
1050 cl_env_reexit(nest->cen_cookie);
1054 LASSERT(!cl_io_is_going(env));
1057 EXPORT_SYMBOL(cl_env_nested_get);
1059 void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
1061 cl_env_put(env, &nest->cen_refcheck);
1062 cl_env_reexit(nest->cen_cookie);
1064 EXPORT_SYMBOL(cl_env_nested_put);
1067 * Converts struct cl_attr to struct ost_lvb.
1071 void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr)
1074 lvb->lvb_size = attr->cat_size;
1075 lvb->lvb_mtime = attr->cat_mtime;
1076 lvb->lvb_atime = attr->cat_atime;
1077 lvb->lvb_ctime = attr->cat_ctime;
1078 lvb->lvb_blocks = attr->cat_blocks;
1081 EXPORT_SYMBOL(cl_attr2lvb);
1084 * Converts struct ost_lvb to struct cl_attr.
1088 void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
1091 attr->cat_size = lvb->lvb_size;
1092 attr->cat_mtime = lvb->lvb_mtime;
1093 attr->cat_atime = lvb->lvb_atime;
1094 attr->cat_ctime = lvb->lvb_ctime;
1095 attr->cat_blocks = lvb->lvb_blocks;
1098 EXPORT_SYMBOL(cl_lvb2attr);
1100 /*****************************************************************************
1102 * Temporary prototype thing: mirror obd-devices into cl devices.
1106 struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
1107 struct lu_device_type *ldt,
1108 struct lu_device *next)
1110 const char *typename;
1111 struct lu_device *d;
1113 LASSERT(ldt != NULL);
1115 typename = ldt->ldt_name;
1116 d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
1122 rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
1125 lu_ref_add(&d->ld_reference,
1126 "lu-stack", &lu_site_init);
1128 ldt->ldt_ops->ldto_device_free(env, d);
1129 CERROR("can't init device '%s', %d\n", typename, rc);
1133 CERROR("Cannot allocate device: '%s'\n", typename);
1134 return lu2cl_dev(d);
1136 EXPORT_SYMBOL(cl_type_setup);
1139 * Finalize device stack by calling lu_stack_fini().
1141 void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
1143 lu_stack_fini(env, cl2lu_dev(cl));
1145 EXPORT_SYMBOL(cl_stack_fini);
1147 int cl_lock_init(void);
1148 void cl_lock_fini(void);
1150 int cl_page_init(void);
1151 void cl_page_fini(void);
1153 static struct lu_context_key cl_key;
1155 struct cl_thread_info *cl_env_info(const struct lu_env *env)
1157 return lu_context_key_get(&env->le_ctx, &cl_key);
1160 /* defines cl0_key_{init,fini}() */
1161 LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
1163 static void *cl_key_init(const struct lu_context *ctx,
1164 struct lu_context_key *key)
1166 struct cl_thread_info *info;
1168 info = cl0_key_init(ctx, key);
1169 if (!IS_ERR(info)) {
1172 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1173 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1178 static void cl_key_fini(const struct lu_context *ctx,
1179 struct lu_context_key *key, void *data)
1181 struct cl_thread_info *info;
1185 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1186 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1187 cl0_key_fini(ctx, key, data);
1190 static void cl_key_exit(const struct lu_context *ctx,
1191 struct lu_context_key *key, void *data)
1193 struct cl_thread_info *info = data;
1196 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
1197 LASSERT(info->clt_counters[i].ctc_nr_held == 0);
1198 LASSERT(info->clt_counters[i].ctc_nr_used == 0);
1199 LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
1200 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
1201 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1202 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1206 static struct lu_context_key cl_key = {
1207 .lct_tags = LCT_CL_THREAD,
1208 .lct_init = cl_key_init,
1209 .lct_fini = cl_key_fini,
1210 .lct_exit = cl_key_exit
1213 static struct lu_kmem_descr cl_object_caches[] = {
1215 .ckd_cache = &cl_env_kmem,
1216 .ckd_name = "cl_env_kmem",
1217 .ckd_size = sizeof (struct cl_env)
1225 * Global initialization of cl-data. Create kmem caches, register
1226 * lu_context_key's, etc.
1228 * \see cl_global_fini()
1230 int cl_global_init(void)
1234 result = cl_env_store_init();
1238 result = lu_kmem_init(cl_object_caches);
1240 LU_CONTEXT_KEY_INIT(&cl_key);
1241 result = lu_context_key_register(&cl_key);
1243 result = cl_lock_init();
1245 result = cl_page_init();
1249 cl_env_store_fini();
1254 * Finalization of global cl-data. Dual to cl_global_init().
1256 void cl_global_fini(void)
1260 lu_context_key_degister(&cl_key);
1261 lu_kmem_fini(cl_object_caches);
1262 cl_env_store_fini();