4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Client Lustre Object.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
51 #define DEBUG_SUBSYSTEM S_CLASS
53 #include <libcfs/libcfs.h>
54 /* class_put_type() */
55 #include <obd_class.h>
56 #include <obd_support.h>
57 #include <lustre_fid.h>
58 #include <libcfs/list.h>
59 #include <libcfs/libcfs_hash.h> /* for cfs_hash stuff */
60 #include <cl_object.h>
61 #include <lu_object.h>
62 #include "cl_internal.h"
64 static struct kmem_cache *cl_env_kmem;
66 /** Lock class of cl_object_header::coh_attr_guard */
67 static struct lock_class_key cl_attr_guard_class;
70 * Initialize cl_object_header.
72 int cl_object_header_init(struct cl_object_header *h)
77 result = lu_object_header_init(&h->coh_lu);
79 spin_lock_init(&h->coh_attr_guard);
80 lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
81 h->coh_page_bufsize = 0;
85 EXPORT_SYMBOL(cl_object_header_init);
88 * Finalize cl_object_header.
90 void cl_object_header_fini(struct cl_object_header *h)
92 lu_object_header_fini(&h->coh_lu);
96 * Returns a cl_object with a given \a fid.
98 * Returns either cached or newly created object. Additional reference on the
99 * returned object is acquired.
101 * \see lu_object_find(), cl_page_find(), cl_lock_find()
103 struct cl_object *cl_object_find(const struct lu_env *env,
104 struct cl_device *cd, const struct lu_fid *fid,
105 const struct cl_object_conf *c)
108 return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
110 EXPORT_SYMBOL(cl_object_find);
113 * Releases a reference on \a o.
115 * When last reference is released object is returned to the cache, unless
116 * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
118 * \see cl_page_put(), cl_lock_put().
120 void cl_object_put(const struct lu_env *env, struct cl_object *o)
122 lu_object_put(env, &o->co_lu);
124 EXPORT_SYMBOL(cl_object_put);
127 * Acquire an additional reference to the object \a o.
129 * This can only be used to acquire _additional_ reference, i.e., caller
130 * already has to possess at least one reference to \a o before calling this.
132 * \see cl_page_get(), cl_lock_get().
134 void cl_object_get(struct cl_object *o)
136 lu_object_get(&o->co_lu);
138 EXPORT_SYMBOL(cl_object_get);
141 * Returns the top-object for a given \a o.
145 struct cl_object *cl_object_top(struct cl_object *o)
147 struct cl_object_header *hdr = cl_object_header(o);
148 struct cl_object *top;
150 while (hdr->coh_parent != NULL)
151 hdr = hdr->coh_parent;
153 top = lu2cl(lu_object_top(&hdr->coh_lu));
154 CDEBUG(D_TRACE, "%p -> %p\n", o, top);
157 EXPORT_SYMBOL(cl_object_top);
160 * Returns pointer to the lock protecting data-attributes for the given object
163 * Data-attributes are protected by the cl_object_header::coh_attr_guard
164 * spin-lock in the top-object.
166 * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
168 static spinlock_t *cl_object_attr_guard(struct cl_object *o)
170 return &cl_object_header(cl_object_top(o))->coh_attr_guard;
174 * Locks data-attributes.
176 * Prevents data-attributes from changing, until lock is released by
177 * cl_object_attr_unlock(). This has to be called before calls to
178 * cl_object_attr_get(), cl_object_attr_update().
180 void cl_object_attr_lock(struct cl_object *o)
181 __acquires(cl_object_attr_guard(o))
183 spin_lock(cl_object_attr_guard(o));
185 EXPORT_SYMBOL(cl_object_attr_lock);
188 * Releases data-attributes lock, acquired by cl_object_attr_lock().
190 void cl_object_attr_unlock(struct cl_object *o)
191 __releases(cl_object_attr_guard(o))
193 spin_unlock(cl_object_attr_guard(o));
195 EXPORT_SYMBOL(cl_object_attr_unlock);
198 * Returns data-attributes of an object \a obj.
200 * Every layer is asked (by calling cl_object_operations::coo_attr_get())
201 * top-to-bottom to fill in parts of \a attr that this layer is responsible
204 int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
205 struct cl_attr *attr)
207 struct lu_object_header *top;
210 assert_spin_locked(cl_object_attr_guard(obj));
213 top = obj->co_lu.lo_header;
215 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
216 if (obj->co_ops->coo_attr_get != NULL) {
217 result = obj->co_ops->coo_attr_get(env, obj, attr);
227 EXPORT_SYMBOL(cl_object_attr_get);
230 * Updates data-attributes of an object \a obj.
232 * Only attributes, mentioned in a validness bit-mask \a v are
233 * updated. Calls cl_object_operations::coo_upd_attr() on every layer, bottom
236 int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
237 const struct cl_attr *attr, unsigned v)
239 struct lu_object_header *top;
242 assert_spin_locked(cl_object_attr_guard(obj));
245 top = obj->co_lu.lo_header;
247 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
248 if (obj->co_ops->coo_attr_update != NULL) {
249 result = obj->co_ops->coo_attr_update(env, obj, attr,
260 EXPORT_SYMBOL(cl_object_attr_update);
263 * Notifies layers (bottom-to-top) that glimpse AST was received.
265 * Layers have to fill \a lvb fields with information that will be shipped
266 * back to glimpse issuer.
268 * \see cl_lock_operations::clo_glimpse()
270 int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
273 struct lu_object_header *top;
277 top = obj->co_lu.lo_header;
279 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
280 if (obj->co_ops->coo_glimpse != NULL) {
281 result = obj->co_ops->coo_glimpse(env, obj, lvb);
286 LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
287 "size: "LPU64" mtime: "LPU64" atime: "LPU64" "
288 "ctime: "LPU64" blocks: "LPU64"\n",
289 lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
290 lvb->lvb_ctime, lvb->lvb_blocks);
293 EXPORT_SYMBOL(cl_object_glimpse);
296 * Updates a configuration of an object \a obj.
298 int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
299 const struct cl_object_conf *conf)
301 struct lu_object_header *top;
305 top = obj->co_lu.lo_header;
307 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
308 if (obj->co_ops->coo_conf_set != NULL) {
309 result = obj->co_ops->coo_conf_set(env, obj, conf);
316 EXPORT_SYMBOL(cl_conf_set);
319 * Prunes caches of pages and locks for this object.
321 int cl_object_prune(const struct lu_env *env, struct cl_object *obj)
323 struct lu_object_header *top;
328 top = obj->co_lu.lo_header;
330 list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
331 if (o->co_ops->coo_prune != NULL) {
332 result = o->co_ops->coo_prune(env, o);
340 EXPORT_SYMBOL(cl_object_prune);
343 * Get stripe information of this object.
345 int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
346 struct lov_user_md __user *uarg)
348 struct lu_object_header *top;
352 top = obj->co_lu.lo_header;
353 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
354 if (obj->co_ops->coo_getstripe != NULL) {
355 result = obj->co_ops->coo_getstripe(env, obj, uarg);
362 EXPORT_SYMBOL(cl_object_getstripe);
365 * Find whether there is any callback data (ldlm lock) attached upon this
368 int cl_object_find_cbdata(const struct lu_env *env, struct cl_object *obj,
369 ldlm_iterator_t iter, void *data)
371 struct lu_object_header *top;
375 top = obj->co_lu.lo_header;
376 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
377 if (obj->co_ops->coo_find_cbdata != NULL) {
378 result = obj->co_ops->coo_find_cbdata(env, obj, iter,
386 EXPORT_SYMBOL(cl_object_find_cbdata);
389 * Get fiemap extents from file object.
391 * \param env [in] lustre environment
392 * \param obj [in] file object
393 * \param key [in] fiemap request argument
394 * \param fiemap [out] fiemap extents mapping retrived
395 * \param buflen [in] max buffer length of @fiemap
400 int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
401 struct ll_fiemap_info_key *key,
402 struct fiemap *fiemap, size_t *buflen)
404 struct lu_object_header *top;
408 top = obj->co_lu.lo_header;
409 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
410 if (obj->co_ops->coo_fiemap != NULL) {
411 result = obj->co_ops->coo_fiemap(env, obj, key, fiemap,
419 EXPORT_SYMBOL(cl_object_fiemap);
421 int cl_object_obd_info_get(const struct lu_env *env, struct cl_object *obj,
422 struct obd_info *oinfo,
423 struct ptlrpc_request_set *set)
425 struct lu_object_header *top;
429 top = obj->co_lu.lo_header;
430 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
431 if (obj->co_ops->coo_obd_info_get != NULL) {
432 result = obj->co_ops->coo_obd_info_get(env, obj, oinfo,
440 EXPORT_SYMBOL(cl_object_obd_info_get);
442 int cl_object_data_version(const struct lu_env *env, struct cl_object *obj,
443 __u64 *data_version, int flags)
445 struct lu_object_header *top;
449 top = obj->co_lu.lo_header;
450 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
451 if (obj->co_ops->coo_data_version != NULL) {
452 result = obj->co_ops->coo_data_version(env, obj,
453 data_version, flags);
460 EXPORT_SYMBOL(cl_object_data_version);
463 * Helper function removing all object locks, and marking object for
464 * deletion. All object pages must have been deleted at this point.
466 * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
467 * and sub- objects respectively.
469 void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
471 struct cl_object_header *hdr = cl_object_header(obj);
473 set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
475 EXPORT_SYMBOL(cl_object_kill);
477 void cache_stats_init(struct cache_stats *cs, const char *name)
482 for (i = 0; i < CS_NR; i++)
483 atomic_set(&cs->cs_stats[i], 0);
486 static int cache_stats_print(const struct cache_stats *cs,
487 struct seq_file *m, int h)
492 * lookup hit total cached create
493 * env: ...... ...... ...... ...... ......
496 const char *names[CS_NR] = CS_NAMES;
498 seq_printf(m, "%6s", " ");
499 for (i = 0; i < CS_NR; i++)
500 seq_printf(m, "%8s", names[i]);
504 seq_printf(m, "%5.5s:", cs->cs_name);
505 for (i = 0; i < CS_NR; i++)
506 seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i]));
510 static void cl_env_percpu_refill(void);
513 * Initialize client site.
515 * Perform common initialization (lu_site_init()), and initialize statistical
516 * counters. Also perform global initializations on the first call.
518 int cl_site_init(struct cl_site *s, struct cl_device *d)
523 result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
525 cache_stats_init(&s->cs_pages, "pages");
526 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
527 atomic_set(&s->cs_pages_state[0], 0);
528 cl_env_percpu_refill();
532 EXPORT_SYMBOL(cl_site_init);
535 * Finalize client site. Dual to cl_site_init().
537 void cl_site_fini(struct cl_site *s)
539 lu_site_fini(&s->cs_lu);
541 EXPORT_SYMBOL(cl_site_fini);
543 static struct cache_stats cl_env_stats = {
545 .cs_stats = { ATOMIC_INIT(0), }
549 * Outputs client site statistical counters into a buffer. Suitable for
550 * ll_rd_*()-style functions.
552 int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
554 static const char *pstate[] = {
564 lookup hit total busy create
565 pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
566 locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
567 env: ...... ...... ...... ...... ......
569 lu_site_stats_seq_print(&site->cs_lu, m);
570 cache_stats_print(&site->cs_pages, m, 1);
572 for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
573 seq_printf(m, "%s: %u ", pstate[i],
574 atomic_read(&site->cs_pages_state[i]));
575 seq_printf(m, "]\n");
576 cache_stats_print(&cl_env_stats, m, 0);
580 EXPORT_SYMBOL(cl_site_stats_print);
582 /*****************************************************************************
584 * lu_env handling on client.
589 * The most efficient way is to store cl_env pointer in task specific
590 * structures. On Linux, it wont' be easy to use task_struct->journal_info
591 * because Lustre code may call into other fs which has certain assumptions
592 * about journal_info. Currently following fields in task_struct are identified
593 * can be used for this purpose:
594 * - cl_env: for liblustre.
595 * - tux_info: ony on RedHat kernel.
597 * \note As long as we use task_struct to store cl_env, we assume that once
598 * called into Lustre, we'll never call into the other part of the kernel
599 * which will use those fields in task_struct without explicitly exiting
602 * If there's no space in task_struct is available, hash will be used.
606 static struct list_head cl_envs;
607 static unsigned cl_envs_cached_nr = 0;
608 static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
610 static DEFINE_SPINLOCK(cl_envs_guard);
615 struct lu_context ce_ses;
617 #ifdef LL_TASK_CL_ENV
621 * This allows cl_env to be entered into cl_env_hash which implements
622 * the current thread -> client environment lookup.
624 struct hlist_node ce_node;
627 * Owner for the current cl_env.
629 * If LL_TASK_CL_ENV is defined, this point to the owning current,
630 * only for debugging purpose ;
631 * Otherwise hash is used, and this is the key for cfs_hash.
632 * Now current thread pid is stored. Note using thread pointer would
633 * lead to unbalanced hash because of its specific allocation locality
634 * and could be varied for different platforms and OSes, even different
640 * Linkage into global list of all client environments. Used for
641 * garbage collection.
643 struct list_head ce_linkage;
649 * Debugging field: address of the caller who made original
655 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
656 #define CL_ENV_INC(counter) atomic_inc(&cl_env_stats.cs_stats[CS_##counter])
658 #define CL_ENV_DEC(counter) do { \
659 LASSERT(atomic_read(&cl_env_stats.cs_stats[CS_##counter]) > 0); \
660 atomic_dec(&cl_env_stats.cs_stats[CS_##counter]); \
663 #define CL_ENV_INC(counter)
664 #define CL_ENV_DEC(counter)
667 static void cl_env_init0(struct cl_env *cle, void *debug)
669 LASSERT(cle->ce_ref == 0);
670 LASSERT(cle->ce_magic == &cl_env_init0);
671 LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
674 cle->ce_debug = debug;
679 #ifndef LL_TASK_CL_ENV
681 * The implementation of using hash table to connect cl_env and thread
684 static cfs_hash_t *cl_env_hash;
686 static unsigned cl_env_hops_hash(cfs_hash_t *lh,
687 const void *key, unsigned mask)
689 #if BITS_PER_LONG == 64
690 return cfs_hash_u64_hash((__u64)key, mask);
692 return cfs_hash_u32_hash((__u32)key, mask);
696 static void *cl_env_hops_obj(struct hlist_node *hn)
698 struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
700 LASSERT(cle->ce_magic == &cl_env_init0);
704 static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
706 struct cl_env *cle = cl_env_hops_obj(hn);
708 LASSERT(cle->ce_owner != NULL);
709 return (key == cle->ce_owner);
712 static void cl_env_hops_noop(cfs_hash_t *hs, struct hlist_node *hn)
714 struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
715 LASSERT(cle->ce_magic == &cl_env_init0);
718 static cfs_hash_ops_t cl_env_hops = {
719 .hs_hash = cl_env_hops_hash,
720 .hs_key = cl_env_hops_obj,
721 .hs_keycmp = cl_env_hops_keycmp,
722 .hs_object = cl_env_hops_obj,
723 .hs_get = cl_env_hops_noop,
724 .hs_put_locked = cl_env_hops_noop,
727 static inline struct cl_env *cl_env_fetch(void)
731 cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid);
732 LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
736 static inline void cl_env_attach(struct cl_env *cle)
741 LASSERT(cle->ce_owner == NULL);
742 cle->ce_owner = (void *) (long) current->pid;
743 rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
749 static inline void cl_env_do_detach(struct cl_env *cle)
753 LASSERT(cle->ce_owner == (void *) (long) current->pid);
754 cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
756 LASSERT(cookie == cle);
757 cle->ce_owner = NULL;
760 static int cl_env_store_init(void) {
761 cl_env_hash = cfs_hash_create("cl_env",
762 HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
763 HASH_CL_ENV_BKT_BITS, 0,
767 CFS_HASH_RW_BKTLOCK);
768 return cl_env_hash != NULL ? 0 :-ENOMEM;
771 static void cl_env_store_fini(void) {
772 cfs_hash_putref(cl_env_hash);
775 #else /* LL_TASK_CL_ENV */
777 * The implementation of store cl_env directly in thread structure.
780 static inline struct cl_env *cl_env_fetch(void)
784 cle = current->LL_TASK_CL_ENV;
785 if (cle && cle->ce_magic != &cl_env_init0)
790 static inline void cl_env_attach(struct cl_env *cle)
793 LASSERT(cle->ce_owner == NULL);
794 cle->ce_owner = current;
795 cle->ce_prev = current->LL_TASK_CL_ENV;
796 current->LL_TASK_CL_ENV = cle;
800 static inline void cl_env_do_detach(struct cl_env *cle)
802 LASSERT(cle->ce_owner == current);
803 LASSERT(current->LL_TASK_CL_ENV == cle);
804 current->LL_TASK_CL_ENV = cle->ce_prev;
805 cle->ce_owner = NULL;
808 static int cl_env_store_init(void) { return 0; }
809 static void cl_env_store_fini(void) { }
811 #endif /* LL_TASK_CL_ENV */
813 static inline struct cl_env *cl_env_detach(struct cl_env *cle)
816 cle = cl_env_fetch();
818 if (cle && cle->ce_owner)
819 cl_env_do_detach(cle);
824 static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
829 OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, GFP_NOFS);
833 INIT_LIST_HEAD(&cle->ce_linkage);
834 cle->ce_magic = &cl_env_init0;
836 rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
838 rc = lu_context_init(&cle->ce_ses,
839 LCT_SESSION | ses_tags);
841 lu_context_enter(&cle->ce_ses);
842 env->le_ses = &cle->ce_ses;
843 cl_env_init0(cle, debug);
848 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
855 env = ERR_PTR(-ENOMEM);
859 static void cl_env_fini(struct cl_env *cle)
862 lu_context_fini(&cle->ce_lu.le_ctx);
863 lu_context_fini(&cle->ce_ses);
864 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
867 static struct lu_env *cl_env_obtain(void *debug)
873 spin_lock(&cl_envs_guard);
874 LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
875 if (cl_envs_cached_nr > 0) {
878 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
879 list_del_init(&cle->ce_linkage);
881 spin_unlock(&cl_envs_guard);
884 rc = lu_env_refill(env);
886 cl_env_init0(cle, debug);
887 lu_context_enter(&env->le_ctx);
888 lu_context_enter(&cle->ce_ses);
894 spin_unlock(&cl_envs_guard);
895 env = cl_env_new(lu_context_tags_default,
896 lu_session_tags_default, debug);
901 static inline struct cl_env *cl_env_container(struct lu_env *env)
903 return container_of(env, struct cl_env, ce_lu);
906 struct lu_env *cl_env_peek(int *refcheck)
913 /* check that we don't go far from untrusted pointer */
914 CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
917 cle = cl_env_fetch();
921 *refcheck = ++cle->ce_ref;
923 CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
928 * Returns lu_env: if there already is an environment associated with the
929 * current thread, it is returned, otherwise, new environment is allocated.
931 * Allocations are amortized through the global cache of environments.
933 * \param refcheck pointer to a counter used to detect environment leaks. In
934 * the usual case cl_env_get() and cl_env_put() are called in the same lexical
935 * scope and pointer to the same integer is passed as \a refcheck. This is
936 * used to detect missed cl_env_put().
940 struct lu_env *cl_env_get(int *refcheck)
944 env = cl_env_peek(refcheck);
946 env = cl_env_obtain(__builtin_return_address(0));
950 cle = cl_env_container(env);
952 *refcheck = cle->ce_ref;
953 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
958 EXPORT_SYMBOL(cl_env_get);
961 * Forces an allocation of a fresh environment with given tags.
965 struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
969 LASSERT(cl_env_peek(refcheck) == NULL);
970 env = cl_env_new(tags, tags, __builtin_return_address(0));
974 cle = cl_env_container(env);
975 *refcheck = cle->ce_ref;
976 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
980 EXPORT_SYMBOL(cl_env_alloc);
982 static void cl_env_exit(struct cl_env *cle)
984 LASSERT(cle->ce_owner == NULL);
985 lu_context_exit(&cle->ce_lu.le_ctx);
986 lu_context_exit(&cle->ce_ses);
990 * Finalizes and frees a given number of cached environments. This is done to
991 * (1) free some memory (not currently hooked into VM), or (2) release
992 * references to modules.
994 unsigned cl_env_cache_purge(unsigned nr)
999 spin_lock(&cl_envs_guard);
1000 for (; !list_empty(&cl_envs) && nr > 0; --nr) {
1001 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
1002 list_del_init(&cle->ce_linkage);
1003 LASSERT(cl_envs_cached_nr > 0);
1004 cl_envs_cached_nr--;
1005 spin_unlock(&cl_envs_guard);
1008 spin_lock(&cl_envs_guard);
1010 LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
1011 spin_unlock(&cl_envs_guard);
1014 EXPORT_SYMBOL(cl_env_cache_purge);
1017 * Release an environment.
1019 * Decrement \a env reference counter. When counter drops to 0, nothing in
1020 * this thread is using environment and it is returned to the allocation
1021 * cache, or freed straight away, if cache is large enough.
1023 void cl_env_put(struct lu_env *env, int *refcheck)
1027 cle = cl_env_container(env);
1029 LASSERT(cle->ce_ref > 0);
1030 LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
1032 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
1033 if (--cle->ce_ref == 0) {
1036 cle->ce_debug = NULL;
1039 * Don't bother to take a lock here.
1041 * Return environment to the cache only when it was allocated
1042 * with the standard tags.
1044 if (cl_envs_cached_nr < cl_envs_cached_max &&
1045 (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
1046 (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
1047 spin_lock(&cl_envs_guard);
1048 list_add(&cle->ce_linkage, &cl_envs);
1049 cl_envs_cached_nr++;
1050 spin_unlock(&cl_envs_guard);
1055 EXPORT_SYMBOL(cl_env_put);
1058 * Declares a point of re-entrancy.
1060 * \see cl_env_reexit()
1062 void *cl_env_reenter(void)
1064 return cl_env_detach(NULL);
1066 EXPORT_SYMBOL(cl_env_reenter);
1069 * Exits re-entrancy.
1071 void cl_env_reexit(void *cookie)
1073 cl_env_detach(NULL);
1074 cl_env_attach(cookie);
1076 EXPORT_SYMBOL(cl_env_reexit);
1079 * Setup user-supplied \a env as a current environment. This is to be used to
1080 * guaranteed that environment exists even when cl_env_get() fails. It is up
1081 * to user to ensure proper concurrency control.
1083 * \see cl_env_unplant()
1085 void cl_env_implant(struct lu_env *env, int *refcheck)
1087 struct cl_env *cle = cl_env_container(env);
1089 LASSERT(cle->ce_ref > 0);
1092 cl_env_get(refcheck);
1093 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
1095 EXPORT_SYMBOL(cl_env_implant);
1098 * Detach environment installed earlier by cl_env_implant().
1100 void cl_env_unplant(struct lu_env *env, int *refcheck)
1102 struct cl_env *cle = cl_env_container(env);
1104 LASSERT(cle->ce_ref > 1);
1106 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
1109 cl_env_put(env, refcheck);
1111 EXPORT_SYMBOL(cl_env_unplant);
1113 struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
1117 nest->cen_cookie = NULL;
1118 env = cl_env_peek(&nest->cen_refcheck);
1120 if (!cl_io_is_going(env))
1123 cl_env_put(env, &nest->cen_refcheck);
1124 nest->cen_cookie = cl_env_reenter();
1127 env = cl_env_get(&nest->cen_refcheck);
1129 cl_env_reexit(nest->cen_cookie);
1133 LASSERT(!cl_io_is_going(env));
1136 EXPORT_SYMBOL(cl_env_nested_get);
1138 void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
1140 cl_env_put(env, &nest->cen_refcheck);
1141 cl_env_reexit(nest->cen_cookie);
1143 EXPORT_SYMBOL(cl_env_nested_put);
1146 * Converts struct cl_attr to struct ost_lvb.
1150 void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr)
1153 lvb->lvb_size = attr->cat_size;
1154 lvb->lvb_mtime = attr->cat_mtime;
1155 lvb->lvb_atime = attr->cat_atime;
1156 lvb->lvb_ctime = attr->cat_ctime;
1157 lvb->lvb_blocks = attr->cat_blocks;
1162 * Converts struct ost_lvb to struct cl_attr.
1166 void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
1169 attr->cat_size = lvb->lvb_size;
1170 attr->cat_mtime = lvb->lvb_mtime;
1171 attr->cat_atime = lvb->lvb_atime;
1172 attr->cat_ctime = lvb->lvb_ctime;
1173 attr->cat_blocks = lvb->lvb_blocks;
1176 EXPORT_SYMBOL(cl_lvb2attr);
1178 static struct cl_env cl_env_percpu[NR_CPUS];
1180 static int cl_env_percpu_init(void)
1183 int tags = LCT_REMEMBER | LCT_NOREF;
1187 for_each_possible_cpu(i) {
1190 cle = &cl_env_percpu[i];
1193 INIT_LIST_HEAD(&cle->ce_linkage);
1194 cle->ce_magic = &cl_env_init0;
1195 rc = lu_env_init(env, LCT_CL_THREAD | tags);
1197 rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags);
1199 lu_context_enter(&cle->ce_ses);
1200 env->le_ses = &cle->ce_ses;
1209 /* Indices 0 to i (excluding i) were correctly initialized,
1210 * thus we must uninitialize up to i, the rest are undefined. */
1211 for (j = 0; j < i; j++) {
1212 cle = &cl_env_percpu[i];
1213 lu_context_exit(&cle->ce_ses);
1214 lu_context_fini(&cle->ce_ses);
1215 lu_env_fini(&cle->ce_lu);
1222 static void cl_env_percpu_fini(void)
1226 for_each_possible_cpu(i) {
1227 struct cl_env *cle = &cl_env_percpu[i];
1229 lu_context_exit(&cle->ce_ses);
1230 lu_context_fini(&cle->ce_ses);
1231 lu_env_fini(&cle->ce_lu);
1235 static void cl_env_percpu_refill(void)
1239 for_each_possible_cpu(i)
1240 lu_env_refill(&cl_env_percpu[i].ce_lu);
1243 void cl_env_percpu_put(struct lu_env *env)
1248 cpu = smp_processor_id();
1249 cle = cl_env_container(env);
1250 LASSERT(cle == &cl_env_percpu[cpu]);
1253 LASSERT(cle->ce_ref == 0);
1257 cle->ce_debug = NULL;
1261 EXPORT_SYMBOL(cl_env_percpu_put);
1263 struct lu_env *cl_env_percpu_get()
1267 cle = &cl_env_percpu[get_cpu()];
1268 cl_env_init0(cle, __builtin_return_address(0));
1273 EXPORT_SYMBOL(cl_env_percpu_get);
1275 /*****************************************************************************
1277 * Temporary prototype thing: mirror obd-devices into cl devices.
1281 struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
1282 struct lu_device_type *ldt,
1283 struct lu_device *next)
1285 const char *typename;
1286 struct lu_device *d;
1288 LASSERT(ldt != NULL);
1290 typename = ldt->ldt_name;
1291 d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
1297 rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
1300 lu_ref_add(&d->ld_reference,
1301 "lu-stack", &lu_site_init);
1303 ldt->ldt_ops->ldto_device_free(env, d);
1304 CERROR("can't init device '%s', %d\n", typename, rc);
1308 CERROR("Cannot allocate device: '%s'\n", typename);
1309 return lu2cl_dev(d);
1311 EXPORT_SYMBOL(cl_type_setup);
1314 * Finalize device stack by calling lu_stack_fini().
1316 void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
1318 lu_stack_fini(env, cl2lu_dev(cl));
1320 EXPORT_SYMBOL(cl_stack_fini);
1322 static struct lu_context_key cl_key;
1324 struct cl_thread_info *cl_env_info(const struct lu_env *env)
1326 return lu_context_key_get(&env->le_ctx, &cl_key);
1329 /* defines cl0_key_{init,fini}() */
1330 LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
1332 static void *cl_key_init(const struct lu_context *ctx,
1333 struct lu_context_key *key)
1335 struct cl_thread_info *info;
1337 info = cl0_key_init(ctx, key);
1338 if (!IS_ERR(info)) {
1341 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1342 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1347 static void cl_key_fini(const struct lu_context *ctx,
1348 struct lu_context_key *key, void *data)
1350 struct cl_thread_info *info;
1354 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1355 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1356 cl0_key_fini(ctx, key, data);
1359 static void cl_key_exit(const struct lu_context *ctx,
1360 struct lu_context_key *key, void *data)
1362 struct cl_thread_info *info = data;
1365 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
1366 LASSERT(info->clt_counters[i].ctc_nr_held == 0);
1367 LASSERT(info->clt_counters[i].ctc_nr_used == 0);
1368 LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
1369 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
1370 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1371 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1375 static struct lu_context_key cl_key = {
1376 .lct_tags = LCT_CL_THREAD,
1377 .lct_init = cl_key_init,
1378 .lct_fini = cl_key_fini,
1379 .lct_exit = cl_key_exit
1382 static struct lu_kmem_descr cl_object_caches[] = {
1384 .ckd_cache = &cl_env_kmem,
1385 .ckd_name = "cl_env_kmem",
1386 .ckd_size = sizeof (struct cl_env)
1394 * Global initialization of cl-data. Create kmem caches, register
1395 * lu_context_key's, etc.
1397 * \see cl_global_fini()
1399 int cl_global_init(void)
1403 INIT_LIST_HEAD(&cl_envs);
1405 result = cl_env_store_init();
1409 result = lu_kmem_init(cl_object_caches);
1413 LU_CONTEXT_KEY_INIT(&cl_key);
1414 result = lu_context_key_register(&cl_key);
1418 result = cl_env_percpu_init();
1420 /* no cl_env_percpu_fini on error */
1426 lu_context_key_degister(&cl_key);
1428 lu_kmem_fini(cl_object_caches);
1430 cl_env_store_fini();
1435 * Finalization of global cl-data. Dual to cl_global_init().
1437 void cl_global_fini(void)
1439 cl_env_percpu_fini();
1440 lu_context_key_degister(&cl_key);
1441 lu_kmem_fini(cl_object_caches);
1442 cl_env_store_fini();