4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Client Lustre Object.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
51 #define DEBUG_SUBSYSTEM S_CLASS
53 #include <libcfs/libcfs.h>
54 /* class_put_type() */
55 #include <obd_class.h>
56 #include <obd_support.h>
57 #include <lustre_fid.h>
58 #include <libcfs/list.h>
59 #include <libcfs/libcfs_hash.h> /* for cfs_hash stuff */
60 #include <cl_object.h>
61 #include <lu_object.h>
62 #include "cl_internal.h"
64 static struct kmem_cache *cl_env_kmem;
66 /** Lock class of cl_object_header::coh_attr_guard */
67 static struct lock_class_key cl_attr_guard_class;
70 * Initialize cl_object_header.
72 int cl_object_header_init(struct cl_object_header *h)
77 result = lu_object_header_init(&h->coh_lu);
79 spin_lock_init(&h->coh_attr_guard);
80 lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
81 h->coh_page_bufsize = 0;
85 EXPORT_SYMBOL(cl_object_header_init);
88 * Finalize cl_object_header.
90 void cl_object_header_fini(struct cl_object_header *h)
92 lu_object_header_fini(&h->coh_lu);
96 * Returns a cl_object with a given \a fid.
98 * Returns either cached or newly created object. Additional reference on the
99 * returned object is acquired.
101 * \see lu_object_find(), cl_page_find(), cl_lock_find()
103 struct cl_object *cl_object_find(const struct lu_env *env,
104 struct cl_device *cd, const struct lu_fid *fid,
105 const struct cl_object_conf *c)
108 return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
110 EXPORT_SYMBOL(cl_object_find);
113 * Releases a reference on \a o.
115 * When last reference is released object is returned to the cache, unless
116 * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
118 * \see cl_page_put(), cl_lock_put().
120 void cl_object_put(const struct lu_env *env, struct cl_object *o)
122 lu_object_put(env, &o->co_lu);
124 EXPORT_SYMBOL(cl_object_put);
127 * Acquire an additional reference to the object \a o.
129 * This can only be used to acquire _additional_ reference, i.e., caller
130 * already has to possess at least one reference to \a o before calling this.
132 * \see cl_page_get(), cl_lock_get().
134 void cl_object_get(struct cl_object *o)
136 lu_object_get(&o->co_lu);
138 EXPORT_SYMBOL(cl_object_get);
141 * Returns the top-object for a given \a o.
145 struct cl_object *cl_object_top(struct cl_object *o)
147 struct cl_object_header *hdr = cl_object_header(o);
148 struct cl_object *top;
150 while (hdr->coh_parent != NULL)
151 hdr = hdr->coh_parent;
153 top = lu2cl(lu_object_top(&hdr->coh_lu));
154 CDEBUG(D_TRACE, "%p -> %p\n", o, top);
157 EXPORT_SYMBOL(cl_object_top);
160 * Returns pointer to the lock protecting data-attributes for the given object
163 * Data-attributes are protected by the cl_object_header::coh_attr_guard
164 * spin-lock in the top-object.
166 * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
168 static spinlock_t *cl_object_attr_guard(struct cl_object *o)
170 return &cl_object_header(cl_object_top(o))->coh_attr_guard;
174 * Locks data-attributes.
176 * Prevents data-attributes from changing, until lock is released by
177 * cl_object_attr_unlock(). This has to be called before calls to
178 * cl_object_attr_get(), cl_object_attr_update().
180 void cl_object_attr_lock(struct cl_object *o)
181 __acquires(cl_object_attr_guard(o))
183 spin_lock(cl_object_attr_guard(o));
185 EXPORT_SYMBOL(cl_object_attr_lock);
188 * Releases data-attributes lock, acquired by cl_object_attr_lock().
190 void cl_object_attr_unlock(struct cl_object *o)
191 __releases(cl_object_attr_guard(o))
193 spin_unlock(cl_object_attr_guard(o));
195 EXPORT_SYMBOL(cl_object_attr_unlock);
198 * Returns data-attributes of an object \a obj.
200 * Every layer is asked (by calling cl_object_operations::coo_attr_get())
201 * top-to-bottom to fill in parts of \a attr that this layer is responsible
204 int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
205 struct cl_attr *attr)
207 struct lu_object_header *top;
210 assert_spin_locked(cl_object_attr_guard(obj));
213 top = obj->co_lu.lo_header;
215 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
216 if (obj->co_ops->coo_attr_get != NULL) {
217 result = obj->co_ops->coo_attr_get(env, obj, attr);
227 EXPORT_SYMBOL(cl_object_attr_get);
230 * Updates data-attributes of an object \a obj.
232 * Only attributes, mentioned in a validness bit-mask \a v are
233 * updated. Calls cl_object_operations::coo_upd_attr() on every layer, bottom
236 int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
237 const struct cl_attr *attr, unsigned v)
239 struct lu_object_header *top;
242 assert_spin_locked(cl_object_attr_guard(obj));
245 top = obj->co_lu.lo_header;
247 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
248 if (obj->co_ops->coo_attr_update != NULL) {
249 result = obj->co_ops->coo_attr_update(env, obj, attr,
260 EXPORT_SYMBOL(cl_object_attr_update);
263 * Notifies layers (bottom-to-top) that glimpse AST was received.
265 * Layers have to fill \a lvb fields with information that will be shipped
266 * back to glimpse issuer.
268 * \see cl_lock_operations::clo_glimpse()
270 int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
273 struct lu_object_header *top;
277 top = obj->co_lu.lo_header;
279 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
280 if (obj->co_ops->coo_glimpse != NULL) {
281 result = obj->co_ops->coo_glimpse(env, obj, lvb);
286 LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
287 "size: "LPU64" mtime: "LPU64" atime: "LPU64" "
288 "ctime: "LPU64" blocks: "LPU64"\n",
289 lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
290 lvb->lvb_ctime, lvb->lvb_blocks);
293 EXPORT_SYMBOL(cl_object_glimpse);
296 * Updates a configuration of an object \a obj.
298 int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
299 const struct cl_object_conf *conf)
301 struct lu_object_header *top;
305 top = obj->co_lu.lo_header;
307 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
308 if (obj->co_ops->coo_conf_set != NULL) {
309 result = obj->co_ops->coo_conf_set(env, obj, conf);
316 EXPORT_SYMBOL(cl_conf_set);
319 * Prunes caches of pages and locks for this object.
321 int cl_object_prune(const struct lu_env *env, struct cl_object *obj)
323 struct lu_object_header *top;
328 top = obj->co_lu.lo_header;
330 list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
331 if (o->co_ops->coo_prune != NULL) {
332 result = o->co_ops->coo_prune(env, o);
340 EXPORT_SYMBOL(cl_object_prune);
343 * Get stripe information of this object.
345 int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
346 struct lov_user_md __user *uarg)
348 struct lu_object_header *top;
352 top = obj->co_lu.lo_header;
353 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
354 if (obj->co_ops->coo_getstripe != NULL) {
355 result = obj->co_ops->coo_getstripe(env, obj, uarg);
362 EXPORT_SYMBOL(cl_object_getstripe);
365 * Find whether there is any callback data (ldlm lock) attached upon this
368 int cl_object_find_cbdata(const struct lu_env *env, struct cl_object *obj,
369 ldlm_iterator_t iter, void *data)
371 struct lu_object_header *top;
375 top = obj->co_lu.lo_header;
376 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
377 if (obj->co_ops->coo_find_cbdata != NULL) {
378 result = obj->co_ops->coo_find_cbdata(env, obj, iter,
386 EXPORT_SYMBOL(cl_object_find_cbdata);
389 * Get fiemap extents from file object.
391 * \param env [in] lustre environment
392 * \param obj [in] file object
393 * \param key [in] fiemap request argument
394 * \param fiemap [out] fiemap extents mapping retrived
395 * \param buflen [in] max buffer length of @fiemap
400 int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
401 struct ll_fiemap_info_key *key,
402 struct fiemap *fiemap, size_t *buflen)
404 struct lu_object_header *top;
408 top = obj->co_lu.lo_header;
409 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
410 if (obj->co_ops->coo_fiemap != NULL) {
411 result = obj->co_ops->coo_fiemap(env, obj, key, fiemap,
419 EXPORT_SYMBOL(cl_object_fiemap);
421 int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
422 struct cl_layout *cl)
424 struct lu_object_header *top = obj->co_lu.lo_header;
427 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
428 if (obj->co_ops->coo_layout_get != NULL)
429 return obj->co_ops->coo_layout_get(env, obj, cl);
434 EXPORT_SYMBOL(cl_object_layout_get);
436 loff_t cl_object_maxbytes(struct cl_object *obj)
438 struct lu_object_header *top = obj->co_lu.lo_header;
439 loff_t maxbytes = LLONG_MAX;
442 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
443 if (obj->co_ops->coo_maxbytes != NULL)
444 maxbytes = min_t(loff_t, obj->co_ops->coo_maxbytes(obj),
450 EXPORT_SYMBOL(cl_object_maxbytes);
453 * Helper function removing all object locks, and marking object for
454 * deletion. All object pages must have been deleted at this point.
456 * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
457 * and sub- objects respectively.
459 void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
461 struct cl_object_header *hdr = cl_object_header(obj);
463 set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
465 EXPORT_SYMBOL(cl_object_kill);
467 void cache_stats_init(struct cache_stats *cs, const char *name)
472 for (i = 0; i < CS_NR; i++)
473 atomic_set(&cs->cs_stats[i], 0);
476 static int cache_stats_print(const struct cache_stats *cs,
477 struct seq_file *m, int h)
482 * lookup hit total cached create
483 * env: ...... ...... ...... ...... ......
486 const char *names[CS_NR] = CS_NAMES;
488 seq_printf(m, "%6s", " ");
489 for (i = 0; i < CS_NR; i++)
490 seq_printf(m, "%8s", names[i]);
494 seq_printf(m, "%5.5s:", cs->cs_name);
495 for (i = 0; i < CS_NR; i++)
496 seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i]));
500 static void cl_env_percpu_refill(void);
503 * Initialize client site.
505 * Perform common initialization (lu_site_init()), and initialize statistical
506 * counters. Also perform global initializations on the first call.
508 int cl_site_init(struct cl_site *s, struct cl_device *d)
513 result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
515 cache_stats_init(&s->cs_pages, "pages");
516 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
517 atomic_set(&s->cs_pages_state[0], 0);
518 cl_env_percpu_refill();
522 EXPORT_SYMBOL(cl_site_init);
525 * Finalize client site. Dual to cl_site_init().
527 void cl_site_fini(struct cl_site *s)
529 lu_site_fini(&s->cs_lu);
531 EXPORT_SYMBOL(cl_site_fini);
533 static struct cache_stats cl_env_stats = {
535 .cs_stats = { ATOMIC_INIT(0), }
539 * Outputs client site statistical counters into a buffer. Suitable for
540 * ll_rd_*()-style functions.
542 int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
544 static const char *pstate[] = {
554 lookup hit total busy create
555 pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
556 locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
557 env: ...... ...... ...... ...... ......
559 lu_site_stats_seq_print(&site->cs_lu, m);
560 cache_stats_print(&site->cs_pages, m, 1);
562 for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
563 seq_printf(m, "%s: %u ", pstate[i],
564 atomic_read(&site->cs_pages_state[i]));
565 seq_printf(m, "]\n");
566 cache_stats_print(&cl_env_stats, m, 0);
570 EXPORT_SYMBOL(cl_site_stats_print);
572 /*****************************************************************************
574 * lu_env handling on client.
579 * The most efficient way is to store cl_env pointer in task specific
580 * structures. On Linux, it wont' be easy to use task_struct->journal_info
581 * because Lustre code may call into other fs which has certain assumptions
582 * about journal_info. Currently following fields in task_struct are identified
583 * can be used for this purpose:
584 * - cl_env: for liblustre.
585 * - tux_info: ony on RedHat kernel.
587 * \note As long as we use task_struct to store cl_env, we assume that once
588 * called into Lustre, we'll never call into the other part of the kernel
589 * which will use those fields in task_struct without explicitly exiting
592 * If there's no space in task_struct is available, hash will be used.
596 static struct list_head cl_envs;
597 static unsigned cl_envs_cached_nr = 0;
598 static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
600 static DEFINE_SPINLOCK(cl_envs_guard);
605 struct lu_context ce_ses;
607 #ifdef LL_TASK_CL_ENV
611 * This allows cl_env to be entered into cl_env_hash which implements
612 * the current thread -> client environment lookup.
614 struct hlist_node ce_node;
617 * Owner for the current cl_env.
619 * If LL_TASK_CL_ENV is defined, this point to the owning current,
620 * only for debugging purpose ;
621 * Otherwise hash is used, and this is the key for cfs_hash.
622 * Now current thread pid is stored. Note using thread pointer would
623 * lead to unbalanced hash because of its specific allocation locality
624 * and could be varied for different platforms and OSes, even different
630 * Linkage into global list of all client environments. Used for
631 * garbage collection.
633 struct list_head ce_linkage;
639 * Debugging field: address of the caller who made original
645 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
646 #define CL_ENV_INC(counter) atomic_inc(&cl_env_stats.cs_stats[CS_##counter])
648 #define CL_ENV_DEC(counter) do { \
649 LASSERT(atomic_read(&cl_env_stats.cs_stats[CS_##counter]) > 0); \
650 atomic_dec(&cl_env_stats.cs_stats[CS_##counter]); \
653 #define CL_ENV_INC(counter)
654 #define CL_ENV_DEC(counter)
657 static void cl_env_init0(struct cl_env *cle, void *debug)
659 LASSERT(cle->ce_ref == 0);
660 LASSERT(cle->ce_magic == &cl_env_init0);
661 LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
664 cle->ce_debug = debug;
669 #ifndef LL_TASK_CL_ENV
671 * The implementation of using hash table to connect cl_env and thread
674 static struct cfs_hash *cl_env_hash;
676 static unsigned cl_env_hops_hash(struct cfs_hash *lh,
677 const void *key, unsigned mask)
679 #if BITS_PER_LONG == 64
680 return cfs_hash_u64_hash((__u64)key, mask);
682 return cfs_hash_u32_hash((__u32)key, mask);
686 static void *cl_env_hops_obj(struct hlist_node *hn)
688 struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
690 LASSERT(cle->ce_magic == &cl_env_init0);
694 static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
696 struct cl_env *cle = cl_env_hops_obj(hn);
698 LASSERT(cle->ce_owner != NULL);
699 return (key == cle->ce_owner);
702 static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn)
704 struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
705 LASSERT(cle->ce_magic == &cl_env_init0);
708 static struct cfs_hash_ops cl_env_hops = {
709 .hs_hash = cl_env_hops_hash,
710 .hs_key = cl_env_hops_obj,
711 .hs_keycmp = cl_env_hops_keycmp,
712 .hs_object = cl_env_hops_obj,
713 .hs_get = cl_env_hops_noop,
714 .hs_put_locked = cl_env_hops_noop,
717 static inline struct cl_env *cl_env_fetch(void)
721 cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid);
722 LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
726 static inline void cl_env_attach(struct cl_env *cle)
731 LASSERT(cle->ce_owner == NULL);
732 cle->ce_owner = (void *) (long) current->pid;
733 rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
739 static inline void cl_env_do_detach(struct cl_env *cle)
743 LASSERT(cle->ce_owner == (void *) (long) current->pid);
744 cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
746 LASSERT(cookie == cle);
747 cle->ce_owner = NULL;
750 static int cl_env_store_init(void) {
751 cl_env_hash = cfs_hash_create("cl_env",
752 HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
753 HASH_CL_ENV_BKT_BITS, 0,
757 CFS_HASH_RW_BKTLOCK);
758 return cl_env_hash != NULL ? 0 :-ENOMEM;
761 static void cl_env_store_fini(void) {
762 cfs_hash_putref(cl_env_hash);
765 #else /* LL_TASK_CL_ENV */
767 * The implementation of store cl_env directly in thread structure.
770 static inline struct cl_env *cl_env_fetch(void)
774 cle = current->LL_TASK_CL_ENV;
775 if (cle && cle->ce_magic != &cl_env_init0)
780 static inline void cl_env_attach(struct cl_env *cle)
783 LASSERT(cle->ce_owner == NULL);
784 cle->ce_owner = current;
785 cle->ce_prev = current->LL_TASK_CL_ENV;
786 current->LL_TASK_CL_ENV = cle;
790 static inline void cl_env_do_detach(struct cl_env *cle)
792 LASSERT(cle->ce_owner == current);
793 LASSERT(current->LL_TASK_CL_ENV == cle);
794 current->LL_TASK_CL_ENV = cle->ce_prev;
795 cle->ce_owner = NULL;
798 static int cl_env_store_init(void) { return 0; }
799 static void cl_env_store_fini(void) { }
801 #endif /* LL_TASK_CL_ENV */
803 static inline struct cl_env *cl_env_detach(struct cl_env *cle)
806 cle = cl_env_fetch();
808 if (cle && cle->ce_owner)
809 cl_env_do_detach(cle);
814 static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
819 OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, GFP_NOFS);
823 INIT_LIST_HEAD(&cle->ce_linkage);
824 cle->ce_magic = &cl_env_init0;
826 rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
828 rc = lu_context_init(&cle->ce_ses,
829 LCT_SESSION | ses_tags);
831 lu_context_enter(&cle->ce_ses);
832 env->le_ses = &cle->ce_ses;
833 cl_env_init0(cle, debug);
838 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
845 env = ERR_PTR(-ENOMEM);
849 static void cl_env_fini(struct cl_env *cle)
852 lu_context_fini(&cle->ce_lu.le_ctx);
853 lu_context_fini(&cle->ce_ses);
854 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
857 static struct lu_env *cl_env_obtain(void *debug)
863 spin_lock(&cl_envs_guard);
864 LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
865 if (cl_envs_cached_nr > 0) {
868 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
869 list_del_init(&cle->ce_linkage);
871 spin_unlock(&cl_envs_guard);
874 rc = lu_env_refill(env);
876 cl_env_init0(cle, debug);
877 lu_context_enter(&env->le_ctx);
878 lu_context_enter(&cle->ce_ses);
884 spin_unlock(&cl_envs_guard);
885 env = cl_env_new(lu_context_tags_default,
886 lu_session_tags_default, debug);
891 static inline struct cl_env *cl_env_container(struct lu_env *env)
893 return container_of(env, struct cl_env, ce_lu);
896 struct lu_env *cl_env_peek(int *refcheck)
903 /* check that we don't go far from untrusted pointer */
904 CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
907 cle = cl_env_fetch();
911 *refcheck = ++cle->ce_ref;
913 CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
918 * Returns lu_env: if there already is an environment associated with the
919 * current thread, it is returned, otherwise, new environment is allocated.
921 * Allocations are amortized through the global cache of environments.
923 * \param refcheck pointer to a counter used to detect environment leaks. In
924 * the usual case cl_env_get() and cl_env_put() are called in the same lexical
925 * scope and pointer to the same integer is passed as \a refcheck. This is
926 * used to detect missed cl_env_put().
930 struct lu_env *cl_env_get(int *refcheck)
934 env = cl_env_peek(refcheck);
936 env = cl_env_obtain(__builtin_return_address(0));
940 cle = cl_env_container(env);
942 *refcheck = cle->ce_ref;
943 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
948 EXPORT_SYMBOL(cl_env_get);
951 * Forces an allocation of a fresh environment with given tags.
955 struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
959 LASSERT(cl_env_peek(refcheck) == NULL);
960 env = cl_env_new(tags, tags, __builtin_return_address(0));
964 cle = cl_env_container(env);
965 *refcheck = cle->ce_ref;
966 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
970 EXPORT_SYMBOL(cl_env_alloc);
972 static void cl_env_exit(struct cl_env *cle)
974 LASSERT(cle->ce_owner == NULL);
975 lu_context_exit(&cle->ce_lu.le_ctx);
976 lu_context_exit(&cle->ce_ses);
980 * Finalizes and frees a given number of cached environments. This is done to
981 * (1) free some memory (not currently hooked into VM), or (2) release
982 * references to modules.
984 unsigned cl_env_cache_purge(unsigned nr)
989 spin_lock(&cl_envs_guard);
990 for (; !list_empty(&cl_envs) && nr > 0; --nr) {
991 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
992 list_del_init(&cle->ce_linkage);
993 LASSERT(cl_envs_cached_nr > 0);
995 spin_unlock(&cl_envs_guard);
998 spin_lock(&cl_envs_guard);
1000 LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
1001 spin_unlock(&cl_envs_guard);
1004 EXPORT_SYMBOL(cl_env_cache_purge);
1007 * Release an environment.
1009 * Decrement \a env reference counter. When counter drops to 0, nothing in
1010 * this thread is using environment and it is returned to the allocation
1011 * cache, or freed straight away, if cache is large enough.
1013 void cl_env_put(struct lu_env *env, int *refcheck)
1017 cle = cl_env_container(env);
1019 LASSERT(cle->ce_ref > 0);
1020 LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
1022 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
1023 if (--cle->ce_ref == 0) {
1026 cle->ce_debug = NULL;
1029 * Don't bother to take a lock here.
1031 * Return environment to the cache only when it was allocated
1032 * with the standard tags.
1034 if (cl_envs_cached_nr < cl_envs_cached_max &&
1035 (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
1036 (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
1037 spin_lock(&cl_envs_guard);
1038 list_add(&cle->ce_linkage, &cl_envs);
1039 cl_envs_cached_nr++;
1040 spin_unlock(&cl_envs_guard);
1045 EXPORT_SYMBOL(cl_env_put);
1048 * Declares a point of re-entrancy.
1050 * \see cl_env_reexit()
1052 void *cl_env_reenter(void)
1054 return cl_env_detach(NULL);
1056 EXPORT_SYMBOL(cl_env_reenter);
1059 * Exits re-entrancy.
1061 void cl_env_reexit(void *cookie)
1063 cl_env_detach(NULL);
1064 cl_env_attach(cookie);
1066 EXPORT_SYMBOL(cl_env_reexit);
1069 * Setup user-supplied \a env as a current environment. This is to be used to
1070 * guaranteed that environment exists even when cl_env_get() fails. It is up
1071 * to user to ensure proper concurrency control.
1073 * \see cl_env_unplant()
1075 void cl_env_implant(struct lu_env *env, int *refcheck)
1077 struct cl_env *cle = cl_env_container(env);
1079 LASSERT(cle->ce_ref > 0);
1082 cl_env_get(refcheck);
1083 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
1085 EXPORT_SYMBOL(cl_env_implant);
1088 * Detach environment installed earlier by cl_env_implant().
1090 void cl_env_unplant(struct lu_env *env, int *refcheck)
1092 struct cl_env *cle = cl_env_container(env);
1094 LASSERT(cle->ce_ref > 1);
1096 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
1099 cl_env_put(env, refcheck);
1101 EXPORT_SYMBOL(cl_env_unplant);
1103 struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
1107 nest->cen_cookie = NULL;
1108 env = cl_env_peek(&nest->cen_refcheck);
1110 if (!cl_io_is_going(env))
1113 cl_env_put(env, &nest->cen_refcheck);
1114 nest->cen_cookie = cl_env_reenter();
1117 env = cl_env_get(&nest->cen_refcheck);
1119 cl_env_reexit(nest->cen_cookie);
1123 LASSERT(!cl_io_is_going(env));
1126 EXPORT_SYMBOL(cl_env_nested_get);
1128 void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
1130 cl_env_put(env, &nest->cen_refcheck);
1131 cl_env_reexit(nest->cen_cookie);
1133 EXPORT_SYMBOL(cl_env_nested_put);
1136 * Converts struct cl_attr to struct ost_lvb.
1140 void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr)
1143 lvb->lvb_size = attr->cat_size;
1144 lvb->lvb_mtime = attr->cat_mtime;
1145 lvb->lvb_atime = attr->cat_atime;
1146 lvb->lvb_ctime = attr->cat_ctime;
1147 lvb->lvb_blocks = attr->cat_blocks;
1152 * Converts struct ost_lvb to struct cl_attr.
1156 void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
1159 attr->cat_size = lvb->lvb_size;
1160 attr->cat_mtime = lvb->lvb_mtime;
1161 attr->cat_atime = lvb->lvb_atime;
1162 attr->cat_ctime = lvb->lvb_ctime;
1163 attr->cat_blocks = lvb->lvb_blocks;
1166 EXPORT_SYMBOL(cl_lvb2attr);
1168 static struct cl_env cl_env_percpu[NR_CPUS];
1170 static int cl_env_percpu_init(void)
1173 int tags = LCT_REMEMBER | LCT_NOREF;
1177 for_each_possible_cpu(i) {
1180 cle = &cl_env_percpu[i];
1183 INIT_LIST_HEAD(&cle->ce_linkage);
1184 cle->ce_magic = &cl_env_init0;
1185 rc = lu_env_init(env, LCT_CL_THREAD | tags);
1187 rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags);
1189 lu_context_enter(&cle->ce_ses);
1190 env->le_ses = &cle->ce_ses;
1199 /* Indices 0 to i (excluding i) were correctly initialized,
1200 * thus we must uninitialize up to i, the rest are undefined. */
1201 for (j = 0; j < i; j++) {
1202 cle = &cl_env_percpu[i];
1203 lu_context_exit(&cle->ce_ses);
1204 lu_context_fini(&cle->ce_ses);
1205 lu_env_fini(&cle->ce_lu);
1212 static void cl_env_percpu_fini(void)
1216 for_each_possible_cpu(i) {
1217 struct cl_env *cle = &cl_env_percpu[i];
1219 lu_context_exit(&cle->ce_ses);
1220 lu_context_fini(&cle->ce_ses);
1221 lu_env_fini(&cle->ce_lu);
1225 static void cl_env_percpu_refill(void)
1229 for_each_possible_cpu(i)
1230 lu_env_refill(&cl_env_percpu[i].ce_lu);
1233 void cl_env_percpu_put(struct lu_env *env)
1238 cpu = smp_processor_id();
1239 cle = cl_env_container(env);
1240 LASSERT(cle == &cl_env_percpu[cpu]);
1243 LASSERT(cle->ce_ref == 0);
1247 cle->ce_debug = NULL;
1251 EXPORT_SYMBOL(cl_env_percpu_put);
1253 struct lu_env *cl_env_percpu_get()
1257 cle = &cl_env_percpu[get_cpu()];
1258 cl_env_init0(cle, __builtin_return_address(0));
1263 EXPORT_SYMBOL(cl_env_percpu_get);
1265 /*****************************************************************************
1267 * Temporary prototype thing: mirror obd-devices into cl devices.
1271 struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
1272 struct lu_device_type *ldt,
1273 struct lu_device *next)
1275 const char *typename;
1276 struct lu_device *d;
1278 LASSERT(ldt != NULL);
1280 typename = ldt->ldt_name;
1281 d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
1287 rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
1290 lu_ref_add(&d->ld_reference,
1291 "lu-stack", &lu_site_init);
1293 ldt->ldt_ops->ldto_device_free(env, d);
1294 CERROR("can't init device '%s', %d\n", typename, rc);
1298 CERROR("Cannot allocate device: '%s'\n", typename);
1299 return lu2cl_dev(d);
1301 EXPORT_SYMBOL(cl_type_setup);
1304 * Finalize device stack by calling lu_stack_fini().
1306 void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
1308 lu_stack_fini(env, cl2lu_dev(cl));
1310 EXPORT_SYMBOL(cl_stack_fini);
1312 static struct lu_context_key cl_key;
1314 struct cl_thread_info *cl_env_info(const struct lu_env *env)
1316 return lu_context_key_get(&env->le_ctx, &cl_key);
1319 /* defines cl0_key_{init,fini}() */
1320 LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
1322 static void *cl_key_init(const struct lu_context *ctx,
1323 struct lu_context_key *key)
1325 struct cl_thread_info *info;
1327 info = cl0_key_init(ctx, key);
1328 if (!IS_ERR(info)) {
1331 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1332 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1337 static void cl_key_fini(const struct lu_context *ctx,
1338 struct lu_context_key *key, void *data)
1340 struct cl_thread_info *info;
1344 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1345 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1346 cl0_key_fini(ctx, key, data);
1349 static void cl_key_exit(const struct lu_context *ctx,
1350 struct lu_context_key *key, void *data)
1352 struct cl_thread_info *info = data;
1355 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
1356 LASSERT(info->clt_counters[i].ctc_nr_held == 0);
1357 LASSERT(info->clt_counters[i].ctc_nr_used == 0);
1358 LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
1359 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
1360 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1361 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1365 static struct lu_context_key cl_key = {
1366 .lct_tags = LCT_CL_THREAD,
1367 .lct_init = cl_key_init,
1368 .lct_fini = cl_key_fini,
1369 .lct_exit = cl_key_exit
1372 static struct lu_kmem_descr cl_object_caches[] = {
1374 .ckd_cache = &cl_env_kmem,
1375 .ckd_name = "cl_env_kmem",
1376 .ckd_size = sizeof (struct cl_env)
1384 * Global initialization of cl-data. Create kmem caches, register
1385 * lu_context_key's, etc.
1387 * \see cl_global_fini()
1389 int cl_global_init(void)
1393 INIT_LIST_HEAD(&cl_envs);
1395 result = cl_env_store_init();
1399 result = lu_kmem_init(cl_object_caches);
1403 LU_CONTEXT_KEY_INIT(&cl_key);
1404 result = lu_context_key_register(&cl_key);
1408 result = cl_env_percpu_init();
1410 /* no cl_env_percpu_fini on error */
1416 lu_context_key_degister(&cl_key);
1418 lu_kmem_fini(cl_object_caches);
1420 cl_env_store_fini();
1425 * Finalization of global cl-data. Dual to cl_global_init().
1427 void cl_global_fini(void)
1429 cl_env_percpu_fini();
1430 lu_context_key_degister(&cl_key);
1431 lu_kmem_fini(cl_object_caches);
1432 cl_env_store_fini();