1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Client Lustre Object.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
52 #define DEBUG_SUBSYSTEM S_CLASS
54 # define EXPORT_SYMTAB
57 #include <libcfs/libcfs.h>
58 /* class_put_type() */
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre_fid.h>
62 #include <libcfs/list.h>
63 /* lu_time_global_{init,fini}() */
66 #include <cl_object.h>
67 #include "cl_internal.h"
69 static cfs_mem_cache_t *cl_env_kmem;
71 /** Lock class of cl_object_header::coh_page_guard */
72 static struct lock_class_key cl_page_guard_class;
73 /** Lock class of cl_object_header::coh_lock_guard */
74 static struct lock_class_key cl_lock_guard_class;
75 /** Lock class of cl_object_header::coh_attr_guard */
76 static struct lock_class_key cl_attr_guard_class;
79 * Initialize cl_object_header.
81 int cl_object_header_init(struct cl_object_header *h)
86 result = lu_object_header_init(&h->coh_lu);
88 spin_lock_init(&h->coh_page_guard);
89 spin_lock_init(&h->coh_lock_guard);
90 spin_lock_init(&h->coh_attr_guard);
91 lockdep_set_class(&h->coh_attr_guard, &cl_page_guard_class);
92 lockdep_set_class(&h->coh_attr_guard, &cl_lock_guard_class);
93 lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
95 /* XXX hard coded GFP_* mask. */
96 INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
97 CFS_INIT_LIST_HEAD(&h->coh_locks);
101 EXPORT_SYMBOL(cl_object_header_init);
104 * Finalize cl_object_header.
106 void cl_object_header_fini(struct cl_object_header *h)
108 LASSERT(list_empty(&h->coh_locks));
109 lu_object_header_fini(&h->coh_lu);
111 EXPORT_SYMBOL(cl_object_header_fini);
114 * Returns a cl_object with a given \a fid.
116 * Returns either cached or newly created object. Additional reference on the
117 * returned object is acquired.
119 * \see lu_object_find(), cl_page_find(), cl_lock_find()
121 struct cl_object *cl_object_find(const struct lu_env *env,
122 struct cl_device *cd, const struct lu_fid *fid,
123 const struct cl_object_conf *c)
126 return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
128 EXPORT_SYMBOL(cl_object_find);
131 * Releases a reference on \a o.
133 * When last reference is released object is returned to the cache, unless
134 * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
136 * \see cl_page_put(), cl_lock_put().
138 void cl_object_put(const struct lu_env *env, struct cl_object *o)
140 lu_object_put(env, &o->co_lu);
142 EXPORT_SYMBOL(cl_object_put);
145 * Acquire an additional reference to the object \a o.
147 * This can only be used to acquire _additional_ reference, i.e., caller
148 * already has to possess at least one reference to \a o before calling this.
150 * \see cl_page_get(), cl_lock_get().
152 void cl_object_get(struct cl_object *o)
154 lu_object_get(&o->co_lu);
156 EXPORT_SYMBOL(cl_object_get);
159 * Returns the top-object for a given \a o.
161 * \see cl_page_top(), cl_io_top()
163 struct cl_object *cl_object_top(struct cl_object *o)
165 struct cl_object_header *hdr = cl_object_header(o);
166 struct cl_object *top;
168 while (hdr->coh_parent != NULL)
169 hdr = hdr->coh_parent;
171 top = lu2cl(lu_object_top(&hdr->coh_lu));
172 CDEBUG(D_TRACE, "%p -> %p\n", o, top);
175 EXPORT_SYMBOL(cl_object_top);
178 * Returns pointer to the lock protecting data-attributes for the given object
181 * Data-attributes are protected by the cl_object_header::coh_attr_guard
182 * spin-lock in the top-object.
184 * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
186 static spinlock_t *cl_object_attr_guard(struct cl_object *o)
188 return &cl_object_header(cl_object_top(o))->coh_attr_guard;
192 * Locks data-attributes.
194 * Prevents data-attributes from changing, until lock is released by
195 * cl_object_attr_unlock(). This has to be called before calls to
196 * cl_object_attr_get(), cl_object_attr_set().
198 void cl_object_attr_lock(struct cl_object *o)
200 spin_lock(cl_object_attr_guard(o));
202 EXPORT_SYMBOL(cl_object_attr_lock);
205 * Releases data-attributes lock, acquired by cl_object_attr_lock().
207 void cl_object_attr_unlock(struct cl_object *o)
209 spin_unlock(cl_object_attr_guard(o));
211 EXPORT_SYMBOL(cl_object_attr_unlock);
214 * Returns data-attributes of an object \a obj.
216 * Every layer is asked (by calling cl_object_operations::coo_attr_get())
217 * top-to-bottom to fill in parts of \a attr that this layer is responsible
220 int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
221 struct cl_attr *attr)
223 struct lu_object_header *top;
226 LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj));
229 top = obj->co_lu.lo_header;
231 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
232 if (obj->co_ops->coo_attr_get != NULL) {
233 result = obj->co_ops->coo_attr_get(env, obj, attr);
243 EXPORT_SYMBOL(cl_object_attr_get);
246 * Updates data-attributes of an object \a obj.
248 * Only attributes, mentioned in a validness bit-mask \a v are
249 * updated. Calls cl_object_operations::coo_attr_set() on every layer, bottom
252 int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
253 const struct cl_attr *attr, unsigned v)
255 struct lu_object_header *top;
258 LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj));
261 top = obj->co_lu.lo_header;
263 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
264 if (obj->co_ops->coo_attr_set != NULL) {
265 result = obj->co_ops->coo_attr_set(env, obj, attr, v);
275 EXPORT_SYMBOL(cl_object_attr_set);
278 * Notifies layers (bottom-to-top) that glimpse AST was received.
280 * Layers have to fill \a lvb fields with information that will be shipped
281 * back to glimpse issuer.
283 * \see cl_lock_operations::clo_glimpse()
285 int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
288 struct lu_object_header *top;
292 top = obj->co_lu.lo_header;
294 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
295 if (obj->co_ops->coo_glimpse != NULL) {
296 result = obj->co_ops->coo_glimpse(env, obj, lvb);
301 LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
302 "size: "LPU64" mtime: "LPU64" atime: "LPU64" "
303 "ctime: "LPU64" blocks: "LPU64"\n",
304 lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
305 lvb->lvb_ctime, lvb->lvb_blocks);
308 EXPORT_SYMBOL(cl_object_glimpse);
311 * Updates a configuration of an object \a obj.
313 int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
314 const struct cl_object_conf *conf)
316 struct lu_object_header *top;
320 top = obj->co_lu.lo_header;
322 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
323 if (obj->co_ops->coo_conf_set != NULL) {
324 result = obj->co_ops->coo_conf_set(env, obj, conf);
331 EXPORT_SYMBOL(cl_conf_set);
334 * Helper function removing all object locks, and marking object for
335 * deletion. All object pages must have been deleted at this point.
337 * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
338 * and sub- objects respectively.
340 void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
342 struct cl_object_header *hdr;
344 hdr = cl_object_header(obj);
345 LASSERT(hdr->coh_tree.rnode == NULL);
346 LASSERT(hdr->coh_pages == 0);
348 set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
350 * Destroy all locks. Object destruction (including cl_inode_fini())
351 * cannot cancel the locks, because in the case of a local client,
352 * where client and server share the same thread running
353 * prune_icache(), this can dead-lock with ldlm_cancel_handler()
354 * waiting on __wait_on_freeing_inode().
356 cl_locks_prune(env, obj, 0);
358 EXPORT_SYMBOL(cl_object_kill);
361 * Prunes caches of pages and locks for this object.
363 void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
366 cl_pages_prune(env, obj);
367 cl_locks_prune(env, obj, 1);
370 EXPORT_SYMBOL(cl_object_prune);
372 void cache_stats_init(struct cache_stats *cs, const char *name)
375 atomic_set(&cs->cs_lookup, 0);
376 atomic_set(&cs->cs_hit, 0);
377 atomic_set(&cs->cs_total, 0);
378 atomic_set(&cs->cs_busy, 0);
381 int cache_stats_print(const struct cache_stats *cs,
382 char *page, int count, int h)
386 lookup hit total cached create
387 env: ...... ...... ...... ...... ......
390 nob += snprintf(page, count,
391 " lookup hit total busy create\n");
393 nob += snprintf(page + nob, count - nob,
394 "%5.5s: %6u %6u %6u %6u %6u",
396 atomic_read(&cs->cs_lookup),
397 atomic_read(&cs->cs_hit),
398 atomic_read(&cs->cs_total),
399 atomic_read(&cs->cs_busy),
400 atomic_read(&cs->cs_created));
405 * Initialize client site.
407 * Perform common initialization (lu_site_init()), and initialize statistical
408 * counters. Also perform global initializations on the first call.
410 int cl_site_init(struct cl_site *s, struct cl_device *d)
415 result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
417 cache_stats_init(&s->cs_pages, "pages");
418 cache_stats_init(&s->cs_locks, "locks");
419 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
420 atomic_set(&s->cs_pages_state[0], 0);
421 for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
422 atomic_set(&s->cs_locks_state[i], 0);
426 EXPORT_SYMBOL(cl_site_init);
429 * Finalize client site. Dual to cl_site_init().
431 void cl_site_fini(struct cl_site *s)
433 lu_site_fini(&s->cs_lu);
435 EXPORT_SYMBOL(cl_site_fini);
437 static struct cache_stats cl_env_stats = {
439 .cs_created = ATOMIC_INIT(0),
440 .cs_lookup = ATOMIC_INIT(0),
441 .cs_hit = ATOMIC_INIT(0),
442 .cs_total = ATOMIC_INIT(0),
443 .cs_busy = ATOMIC_INIT(0)
447 * Outputs client site statistical counters into a buffer. Suitable for
448 * ll_rd_*()-style functions.
450 int cl_site_stats_print(const struct cl_site *site, char *page, int count)
454 static const char *pstate[] = {
461 static const char *lstate[] = {
464 [CLS_ENQUEUED] = "e",
466 [CLS_UNLOCKING] = "u",
471 lookup hit total busy create
472 pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
473 locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
474 env: ...... ...... ...... ...... ......
476 nob = lu_site_stats_print(&site->cs_lu, page, count);
477 nob += cache_stats_print(&site->cs_pages, page + nob, count - nob, 1);
478 nob += snprintf(page + nob, count - nob, " [");
479 for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
480 nob += snprintf(page + nob, count - nob, "%s: %u ",
482 atomic_read(&site->cs_pages_state[i]));
483 nob += snprintf(page + nob, count - nob, "]\n");
484 nob += cache_stats_print(&site->cs_locks, page + nob, count - nob, 0);
485 nob += snprintf(page + nob, count - nob, " [");
486 for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
487 nob += snprintf(page + nob, count - nob, "%s: %u ",
489 atomic_read(&site->cs_locks_state[i]));
490 nob += snprintf(page + nob, count - nob, "]\n");
491 nob += cache_stats_print(&cl_env_stats, page + nob, count - nob, 0);
492 nob += snprintf(page + nob, count - nob, "\n");
495 EXPORT_SYMBOL(cl_site_stats_print);
497 /*****************************************************************************
499 * lu_env handling on client.
506 * XXX: this assumes that re-entrant file system calls (e.g., ->writepage())
507 * do not modify already existing current->journal_info.
510 static CFS_LIST_HEAD(cl_envs);
511 static unsigned cl_envs_cached_nr = 0;
512 static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
514 static spinlock_t cl_envs_guard = SPIN_LOCK_UNLOCKED;
519 struct lu_context ce_ses;
521 * Linkage into global list of all client environments. Used for
522 * garbage collection.
524 struct list_head ce_linkage;
531 * Debugging field: address of the caller who made original
538 #define CL_ENV_INC(counter) atomic_inc(&cl_env_stats.counter)
540 #define CL_ENV_DEC(counter) \
542 LASSERT(atomic_read(&cl_env_stats.counter) > 0); \
543 atomic_dec(&cl_env_stats.counter); \
546 static void cl_env_init0(struct cl_env *cle, void *debug)
548 LASSERT(cle->ce_ref == 0);
549 LASSERT(cle->ce_magic == &cl_env_init0);
550 LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
553 cle->ce_prev = current->journal_info;
554 cle->ce_debug = debug;
555 cle->ce_owner = current;
556 current->journal_info = cle;
560 static struct lu_env *cl_env_new(__u32 tags, void *debug)
565 OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, CFS_ALLOC_IO);
569 CFS_INIT_LIST_HEAD(&cle->ce_linkage);
570 cle->ce_magic = &cl_env_init0;
572 rc = lu_env_init(env, LCT_CL_THREAD|tags);
574 rc = lu_context_init(&cle->ce_ses, LCT_SESSION|tags);
576 lu_context_enter(&cle->ce_ses);
577 env->le_ses = &cle->ce_ses;
578 cl_env_init0(cle, debug);
583 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
586 CL_ENV_INC(cs_created);
587 CL_ENV_INC(cs_total);
590 env = ERR_PTR(-ENOMEM);
594 static void cl_env_fini(struct cl_env *cle)
596 CL_ENV_DEC(cs_total);
597 lu_context_fini(&cle->ce_lu.le_ctx);
598 lu_context_fini(&cle->ce_ses);
599 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
602 static struct lu_env *cl_env_obtain(void *debug)
608 spin_lock(&cl_envs_guard);
609 LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
610 if (cl_envs_cached_nr > 0) {
613 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
614 list_del_init(&cle->ce_linkage);
616 spin_unlock(&cl_envs_guard);
619 rc = lu_env_refill(env);
621 cl_env_init0(cle, debug);
622 lu_context_enter(&env->le_ctx);
623 lu_context_enter(&cle->ce_ses);
629 spin_unlock(&cl_envs_guard);
630 env = cl_env_new(0, debug);
635 static inline struct cl_env *cl_env_container(struct lu_env *env)
637 return container_of(env, struct cl_env, ce_lu);
640 struct lu_env *cl_env_peek(int *refcheck)
645 CL_ENV_INC(cs_lookup);
647 /* check that we don't go far from untrusted pointer */
648 CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
651 cle = current->journal_info;
652 if (cle != NULL && cle->ce_magic == &cl_env_init0) {
655 *refcheck = ++cle->ce_ref;
657 CDEBUG(D_OTHER, "%i@%p\n", cle ? cle->ce_ref : 0, cle);
660 EXPORT_SYMBOL(cl_env_peek);
663 * Returns lu_env: if there already is an environment associated with the
664 * current thread, it is returned, otherwise, new environment is allocated.
666 * Allocations are amortized through the global cache of environments.
668 * \param refcheck pointer to a counter used to detect environment leaks. In
669 * the usual case cl_env_get() and cl_env_put() are called in the same lexical
670 * scope and pointer to the same integer is passed as \a refcheck. This is
671 * used to detect missed cl_env_put().
675 struct lu_env *cl_env_get(int *refcheck)
679 env = cl_env_peek(refcheck);
681 env = cl_env_obtain(__builtin_return_address(0));
685 cle = cl_env_container(env);
686 *refcheck = cle->ce_ref;
687 CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle);
692 EXPORT_SYMBOL(cl_env_get);
695 * Forces an allocation of a fresh environment with given tags.
699 struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
703 LASSERT(cl_env_peek(refcheck) == NULL);
704 env = cl_env_new(tags, __builtin_return_address(0));
708 cle = cl_env_container(env);
709 *refcheck = cle->ce_ref;
710 CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle);
714 EXPORT_SYMBOL(cl_env_alloc);
716 static void cl_env_exit(struct cl_env *cle)
718 lu_context_exit(&cle->ce_lu.le_ctx);
719 lu_context_exit(&cle->ce_ses);
723 * Finalizes and frees a given number of cached environments. This is done to
724 * (1) free some memory (not currently hooked into VM), or (2) release
725 * references to modules.
727 unsigned cl_env_cache_purge(unsigned nr)
732 spin_lock(&cl_envs_guard);
733 for (; !list_empty(&cl_envs) && nr > 0; --nr) {
734 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
735 list_del_init(&cle->ce_linkage);
736 LASSERT(cl_envs_cached_nr > 0);
738 spin_unlock(&cl_envs_guard);
741 spin_lock(&cl_envs_guard);
743 LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
744 spin_unlock(&cl_envs_guard);
747 EXPORT_SYMBOL(cl_env_cache_purge);
750 * Release an environment.
752 * Decrement \a env reference counter. When counter drops to 0, nothing in
753 * this thread is using environment and it is returned to the allocation
754 * cache, or freed straight away, if cache is large enough.
756 void cl_env_put(struct lu_env *env, int *refcheck)
760 cle = cl_env_container(env);
762 LASSERT(cle->ce_ref > 0);
763 LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
765 CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle);
766 if (--cle->ce_ref == 0) {
768 current->journal_info = cle->ce_prev;
769 LASSERT(cle->ce_prev == NULL ||
770 cl_env_container(cle->ce_prev)->ce_magic !=
772 cle->ce_debug = NULL;
773 cle->ce_owner = NULL;
776 * Don't bother to take a lock here.
778 * Return environment to the cache only when it was allocated
779 * with the standard tags.
781 if (cl_envs_cached_nr < cl_envs_cached_max &&
782 (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
783 (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
784 spin_lock(&cl_envs_guard);
785 list_add(&cle->ce_linkage, &cl_envs);
787 spin_unlock(&cl_envs_guard);
792 EXPORT_SYMBOL(cl_env_put);
795 * Declares a point of re-entrancy.
797 * In Linux kernel environments are attached to the thread through
798 * current->journal_info pointer that is used by other sub-systems also. When
799 * lustre code is invoked in the situation where current->journal_info is
800 * potentially already set, cl_env_reenter() is called to save
801 * current->journal_info value, so that current->journal_info field can be
802 * used to store pointer to the environment.
804 * \see cl_env_reexit()
806 void *cl_env_reenter(void)
810 cookie = current->journal_info;
811 current->journal_info = NULL;
812 CDEBUG(D_OTHER, "cookie: %p\n", cookie);
815 EXPORT_SYMBOL(cl_env_reenter);
820 * This restores old value of current->journal_info that was saved by
823 void cl_env_reexit(void *cookie)
825 current->journal_info = cookie;
826 CDEBUG(D_OTHER, "cookie: %p\n", cookie);
828 EXPORT_SYMBOL(cl_env_reexit);
831 * Setup user-supplied \a env as a current environment. This is to be used to
832 * guaranteed that environment exists even when cl_env_get() fails. It is up
833 * to user to ensure proper concurrency control.
835 * \see cl_env_unplant()
837 void cl_env_implant(struct lu_env *env, int *refcheck)
839 struct cl_env *cle = cl_env_container(env);
841 LASSERT(current->journal_info == NULL);
842 LASSERT(cle->ce_ref > 0);
844 current->journal_info = cle;
845 cl_env_get(refcheck);
846 CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle);
848 EXPORT_SYMBOL(cl_env_implant);
851 * Detach environment installed earlier by cl_env_implant().
853 void cl_env_unplant(struct lu_env *env, int *refcheck)
855 struct cl_env *cle = cl_env_container(env);
857 LASSERT(cle == current->journal_info);
858 LASSERT(cle->ce_ref > 1);
860 CDEBUG(D_OTHER, "%i@%p\n", cle->ce_ref, cle);
862 cl_env_put(env, refcheck);
863 current->journal_info = NULL;
865 EXPORT_SYMBOL(cl_env_unplant);
867 struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
871 nest->cen_cookie = NULL;
872 env = cl_env_peek(&nest->cen_refcheck);
874 if (!cl_io_is_going(env))
877 cl_env_put(env, &nest->cen_refcheck);
878 nest->cen_cookie = cl_env_reenter();
881 env = cl_env_get(&nest->cen_refcheck);
882 LASSERT(ergo(!IS_ERR(env), !cl_io_is_going(env)));
885 EXPORT_SYMBOL(cl_env_nested_get);
887 void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
889 cl_env_put(env, &nest->cen_refcheck);
890 cl_env_reexit(nest->cen_cookie);
892 EXPORT_SYMBOL(cl_env_nested_put);
895 * Converts struct cl_attr to struct ost_lvb.
899 void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr)
902 lvb->lvb_size = attr->cat_size;
903 lvb->lvb_mtime = attr->cat_mtime;
904 lvb->lvb_atime = attr->cat_atime;
905 lvb->lvb_ctime = attr->cat_ctime;
906 lvb->lvb_blocks = attr->cat_blocks;
909 EXPORT_SYMBOL(cl_attr2lvb);
912 * Converts struct ost_lvb to struct cl_attr.
916 void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
919 attr->cat_size = lvb->lvb_size;
920 attr->cat_mtime = lvb->lvb_mtime;
921 attr->cat_atime = lvb->lvb_atime;
922 attr->cat_ctime = lvb->lvb_ctime;
923 attr->cat_blocks = lvb->lvb_blocks;
926 EXPORT_SYMBOL(cl_lvb2attr);
929 /*****************************************************************************
931 * Temporary prototype thing: mirror obd-devices into cl devices.
935 struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
936 struct lu_device_type *ldt,
937 struct lu_device *next)
939 const char *typename;
942 LASSERT(ldt != NULL);
944 typename = ldt->ldt_name;
945 d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
951 rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
954 lu_ref_add(&d->ld_reference,
955 "lu-stack", &lu_site_init);
957 ldt->ldt_ops->ldto_device_free(env, d);
958 CERROR("can't init device '%s', %d\n", typename, rc);
962 CERROR("Cannot allocate device: '%s'\n", typename);
965 EXPORT_SYMBOL(cl_type_setup);
968 * Finalize device stack by calling lu_stack_fini().
970 void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
972 lu_stack_fini(env, cl2lu_dev(cl));
974 EXPORT_SYMBOL(cl_stack_fini);
976 int cl_lock_init(void);
977 void cl_lock_fini(void);
979 int cl_page_init(void);
980 void cl_page_fini(void);
982 static struct lu_context_key cl_key;
984 struct cl_thread_info *cl_env_info(const struct lu_env *env)
986 return lu_context_key_get(&env->le_ctx, &cl_key);
989 /* defines cl0_key_{init,fini}() */
990 LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
992 static void *cl_key_init(const struct lu_context *ctx,
993 struct lu_context_key *key)
995 struct cl_thread_info *info;
997 info = cl0_key_init(ctx, key);
1001 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1002 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1007 static void cl_key_fini(const struct lu_context *ctx,
1008 struct lu_context_key *key, void *data)
1010 struct cl_thread_info *info;
1014 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1015 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1016 cl0_key_fini(ctx, key, data);
1019 static void cl_key_exit(const struct lu_context *ctx,
1020 struct lu_context_key *key, void *data)
1022 struct cl_thread_info *info = data;
1025 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
1026 LASSERT(info->clt_counters[i].ctc_nr_held == 0);
1027 LASSERT(info->clt_counters[i].ctc_nr_used == 0);
1028 LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
1029 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
1030 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1031 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1035 static struct lu_context_key cl_key = {
1036 .lct_tags = LCT_CL_THREAD,
1037 .lct_init = cl_key_init,
1038 .lct_fini = cl_key_fini,
1039 .lct_exit = cl_key_exit
1042 static struct lu_kmem_descr cl_object_caches[] = {
1044 .ckd_cache = &cl_env_kmem,
1045 .ckd_name = "cl_env_kmem",
1046 .ckd_size = sizeof (struct cl_env)
1054 * Global initialization of cl-data. Create kmem caches, register
1055 * lu_context_key's, etc.
1057 * \see cl_global_fini()
1059 int cl_global_init(void)
1063 result = lu_kmem_init(cl_object_caches);
1065 LU_CONTEXT_KEY_INIT(&cl_key);
1066 result = lu_context_key_register(&cl_key);
1068 result = cl_lock_init();
1070 result = cl_page_init();
1077 * Finalization of global cl-data. Dual to cl_global_init().
1079 void cl_global_fini(void)
1083 lu_context_key_degister(&cl_key);
1084 lu_kmem_fini(cl_object_caches);