4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Client Lustre Object.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
47 #define DEBUG_SUBSYSTEM S_CLASS
49 #include <linux/list.h>
50 #include <libcfs/libcfs.h>
51 #include <obd_class.h>
52 #include <obd_support.h>
53 #include <lustre_fid.h>
54 #include <libcfs/libcfs_hash.h> /* for cfs_hash stuff */
55 #include <cl_object.h>
56 #include <lu_object.h>
57 #include "cl_internal.h"
59 static struct kmem_cache *cl_env_kmem;
61 /** Lock class of cl_object_header::coh_attr_guard */
62 static struct lock_class_key cl_attr_guard_class;
65 * Initialize cl_object_header.
67 int cl_object_header_init(struct cl_object_header *h)
72 result = lu_object_header_init(&h->coh_lu);
74 spin_lock_init(&h->coh_attr_guard);
75 lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
76 h->coh_page_bufsize = 0;
80 EXPORT_SYMBOL(cl_object_header_init);
83 * Finalize cl_object_header.
85 void cl_object_header_fini(struct cl_object_header *h)
87 lu_object_header_fini(&h->coh_lu);
91 * Returns a cl_object with a given \a fid.
93 * Returns either cached or newly created object. Additional reference on the
94 * returned object is acquired.
96 * \see lu_object_find(), cl_page_find(), cl_lock_find()
98 struct cl_object *cl_object_find(const struct lu_env *env,
99 struct cl_device *cd, const struct lu_fid *fid,
100 const struct cl_object_conf *c)
103 return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
105 EXPORT_SYMBOL(cl_object_find);
108 * Releases a reference on \a o.
110 * When last reference is released object is returned to the cache, unless
111 * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
113 * \see cl_page_put(), cl_lock_put().
115 void cl_object_put(const struct lu_env *env, struct cl_object *o)
117 lu_object_put(env, &o->co_lu);
119 EXPORT_SYMBOL(cl_object_put);
122 * Acquire an additional reference to the object \a o.
124 * This can only be used to acquire _additional_ reference, i.e., caller
125 * already has to possess at least one reference to \a o before calling this.
127 * \see cl_page_get(), cl_lock_get().
129 void cl_object_get(struct cl_object *o)
131 lu_object_get(&o->co_lu);
133 EXPORT_SYMBOL(cl_object_get);
136 * Returns the top-object for a given \a o.
140 struct cl_object *cl_object_top(struct cl_object *o)
142 struct cl_object_header *hdr = cl_object_header(o);
143 struct cl_object *top;
145 while (hdr->coh_parent != NULL)
146 hdr = hdr->coh_parent;
148 top = lu2cl(lu_object_top(&hdr->coh_lu));
149 CDEBUG(D_TRACE, "%p -> %p\n", o, top);
152 EXPORT_SYMBOL(cl_object_top);
155 * Returns pointer to the lock protecting data-attributes for the given object
158 * Data-attributes are protected by the cl_object_header::coh_attr_guard
159 * spin-lock in the top-object.
161 * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
163 static spinlock_t *cl_object_attr_guard(struct cl_object *o)
165 return &cl_object_header(cl_object_top(o))->coh_attr_guard;
169 * Locks data-attributes.
171 * Prevents data-attributes from changing, until lock is released by
172 * cl_object_attr_unlock(). This has to be called before calls to
173 * cl_object_attr_get(), cl_object_attr_update().
175 void cl_object_attr_lock(struct cl_object *o)
176 __acquires(cl_object_attr_guard(o))
178 spin_lock(cl_object_attr_guard(o));
180 EXPORT_SYMBOL(cl_object_attr_lock);
183 * Releases data-attributes lock, acquired by cl_object_attr_lock().
185 void cl_object_attr_unlock(struct cl_object *o)
186 __releases(cl_object_attr_guard(o))
188 spin_unlock(cl_object_attr_guard(o));
190 EXPORT_SYMBOL(cl_object_attr_unlock);
193 * Returns data-attributes of an object \a obj.
195 * Every layer is asked (by calling cl_object_operations::coo_attr_get())
196 * top-to-bottom to fill in parts of \a attr that this layer is responsible
199 int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
200 struct cl_attr *attr)
202 struct lu_object_header *top;
205 assert_spin_locked(cl_object_attr_guard(obj));
208 top = obj->co_lu.lo_header;
210 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
211 if (obj->co_ops->coo_attr_get != NULL) {
212 result = obj->co_ops->coo_attr_get(env, obj, attr);
222 EXPORT_SYMBOL(cl_object_attr_get);
225 * Updates data-attributes of an object \a obj.
227 * Only attributes, mentioned in a validness bit-mask \a v are
228 * updated. Calls cl_object_operations::coo_upd_attr() on every layer, bottom
231 int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
232 const struct cl_attr *attr, unsigned v)
234 struct lu_object_header *top;
237 assert_spin_locked(cl_object_attr_guard(obj));
240 top = obj->co_lu.lo_header;
242 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
243 if (obj->co_ops->coo_attr_update != NULL) {
244 result = obj->co_ops->coo_attr_update(env, obj, attr,
255 EXPORT_SYMBOL(cl_object_attr_update);
258 * Notifies layers (bottom-to-top) that glimpse AST was received.
260 * Layers have to fill \a lvb fields with information that will be shipped
261 * back to glimpse issuer.
263 * \see cl_lock_operations::clo_glimpse()
265 int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
268 struct lu_object_header *top;
272 top = obj->co_lu.lo_header;
274 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
275 if (obj->co_ops->coo_glimpse != NULL) {
276 result = obj->co_ops->coo_glimpse(env, obj, lvb);
281 LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
282 "size: %llu mtime: %llu atime: %llu "
283 "ctime: %llu blocks: %llu\n",
284 lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
285 lvb->lvb_ctime, lvb->lvb_blocks);
288 EXPORT_SYMBOL(cl_object_glimpse);
291 * Updates a configuration of an object \a obj.
293 int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
294 const struct cl_object_conf *conf)
296 struct lu_object_header *top;
300 top = obj->co_lu.lo_header;
302 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
303 if (obj->co_ops->coo_conf_set != NULL) {
304 result = obj->co_ops->coo_conf_set(env, obj, conf);
311 EXPORT_SYMBOL(cl_conf_set);
314 * Prunes caches of pages and locks for this object.
316 int cl_object_prune(const struct lu_env *env, struct cl_object *obj)
318 struct lu_object_header *top;
323 top = obj->co_lu.lo_header;
325 list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
326 if (o->co_ops->coo_prune != NULL) {
327 result = o->co_ops->coo_prune(env, o);
335 EXPORT_SYMBOL(cl_object_prune);
338 * Get stripe information of this object.
340 int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
341 struct lov_user_md __user *uarg, size_t size)
343 struct lu_object_header *top;
347 top = obj->co_lu.lo_header;
348 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
349 if (obj->co_ops->coo_getstripe != NULL) {
350 result = obj->co_ops->coo_getstripe(env, obj, uarg,
358 EXPORT_SYMBOL(cl_object_getstripe);
361 * Get fiemap extents from file object.
363 * \param env [in] lustre environment
364 * \param obj [in] file object
365 * \param key [in] fiemap request argument
366 * \param fiemap [out] fiemap extents mapping retrived
367 * \param buflen [in] max buffer length of @fiemap
372 int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
373 struct ll_fiemap_info_key *key,
374 struct fiemap *fiemap, size_t *buflen)
376 struct lu_object_header *top;
380 top = obj->co_lu.lo_header;
381 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
382 if (obj->co_ops->coo_fiemap != NULL) {
383 result = obj->co_ops->coo_fiemap(env, obj, key, fiemap,
391 EXPORT_SYMBOL(cl_object_fiemap);
393 int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
394 struct cl_layout *cl)
396 struct lu_object_header *top = obj->co_lu.lo_header;
399 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
400 if (obj->co_ops->coo_layout_get != NULL)
401 return obj->co_ops->coo_layout_get(env, obj, cl);
406 EXPORT_SYMBOL(cl_object_layout_get);
408 loff_t cl_object_maxbytes(struct cl_object *obj)
410 struct lu_object_header *top = obj->co_lu.lo_header;
411 loff_t maxbytes = LLONG_MAX;
414 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
415 if (obj->co_ops->coo_maxbytes != NULL)
416 maxbytes = min_t(loff_t, obj->co_ops->coo_maxbytes(obj),
422 EXPORT_SYMBOL(cl_object_maxbytes);
424 int cl_object_flush(const struct lu_env *env, struct cl_object *obj,
425 struct ldlm_lock *lock)
427 struct lu_object_header *top = obj->co_lu.lo_header;
431 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
432 if (obj->co_ops->coo_object_flush) {
433 rc = obj->co_ops->coo_object_flush(env, obj, lock);
440 EXPORT_SYMBOL(cl_object_flush);
443 * Helper function removing all object locks, and marking object for
444 * deletion. All object pages must have been deleted at this point.
446 * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
447 * and sub- objects respectively.
449 void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
451 struct cl_object_header *hdr = cl_object_header(obj);
453 set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
455 EXPORT_SYMBOL(cl_object_kill);
457 void cache_stats_init(struct cache_stats *cs, const char *name)
462 for (i = 0; i < CS_NR; i++)
463 atomic_set(&cs->cs_stats[i], 0);
466 static int cache_stats_print(const struct cache_stats *cs,
467 struct seq_file *m, int h)
472 * lookup hit total cached create
473 * env: ...... ...... ...... ...... ......
476 const char *names[CS_NR] = CS_NAMES;
478 seq_printf(m, "%6s", " ");
479 for (i = 0; i < CS_NR; i++)
480 seq_printf(m, "%8s", names[i]);
484 seq_printf(m, "%5.5s:", cs->cs_name);
485 for (i = 0; i < CS_NR; i++)
486 seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i]));
490 static void cl_env_percpu_refill(void);
493 * Initialize client site.
495 * Perform common initialization (lu_site_init()), and initialize statistical
496 * counters. Also perform global initializations on the first call.
498 int cl_site_init(struct cl_site *s, struct cl_device *d)
503 result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
505 cache_stats_init(&s->cs_pages, "pages");
506 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
507 atomic_set(&s->cs_pages_state[0], 0);
508 cl_env_percpu_refill();
512 EXPORT_SYMBOL(cl_site_init);
515 * Finalize client site. Dual to cl_site_init().
517 void cl_site_fini(struct cl_site *s)
519 lu_site_fini(&s->cs_lu);
521 EXPORT_SYMBOL(cl_site_fini);
523 static struct cache_stats cl_env_stats = {
525 .cs_stats = { ATOMIC_INIT(0), }
529 * Outputs client site statistical counters into a buffer. Suitable for
530 * ll_rd_*()-style functions.
532 int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
534 static const char *pstate[] = {
544 lookup hit total busy create
545 pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
546 locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
547 env: ...... ...... ...... ...... ......
549 lu_site_stats_seq_print(&site->cs_lu, m);
550 cache_stats_print(&site->cs_pages, m, 1);
552 for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
553 seq_printf(m, "%s: %u ", pstate[i],
554 atomic_read(&site->cs_pages_state[i]));
555 seq_printf(m, "]\n");
556 cache_stats_print(&cl_env_stats, m, 0);
560 EXPORT_SYMBOL(cl_site_stats_print);
562 /*****************************************************************************
564 * lu_env handling on client.
569 * The most efficient way is to store cl_env pointer in task specific
570 * structures. On Linux, it isn't easy to use task_struct->journal_info
571 * because Lustre code may call into other fs during memory reclaim, which
572 * has certain assumptions about journal_info. There are not currently any
573 * fields in task_struct that can be used for this purpose.
574 * \note As long as we use task_struct to store cl_env, we assume that once
575 * called into Lustre, we'll never call into the other part of the kernel
576 * which will use those fields in task_struct without explicitly exiting
579 * Since there's no space in task_struct is available, hash will be used.
583 static unsigned cl_envs_cached_max = 32; /* XXX: prototype: arbitrary limit
585 static struct cl_env_cache {
588 struct list_head cec_envs;
594 struct lu_context ce_ses;
597 * Linkage into global list of all client environments. Used for
598 * garbage collection.
600 struct list_head ce_linkage;
606 * Debugging field: address of the caller who made original
612 static void cl_env_inc(enum cache_stats_item item)
614 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
615 atomic_inc(&cl_env_stats.cs_stats[item]);
619 static void cl_env_dec(enum cache_stats_item item)
621 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
622 LASSERT(atomic_read(&cl_env_stats.cs_stats[item]) > 0);
623 atomic_dec(&cl_env_stats.cs_stats[item]);
627 static void cl_env_init0(struct cl_env *cle, void *debug)
629 LASSERT(cle->ce_ref == 0);
630 LASSERT(cle->ce_magic == &cl_env_init0);
631 LASSERT(cle->ce_debug == NULL);
634 cle->ce_debug = debug;
638 static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
643 OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, GFP_NOFS);
647 INIT_LIST_HEAD(&cle->ce_linkage);
648 cle->ce_magic = &cl_env_init0;
650 rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
652 rc = lu_context_init(&cle->ce_ses,
653 LCT_SESSION | ses_tags);
655 lu_context_enter(&cle->ce_ses);
656 env->le_ses = &cle->ce_ses;
657 cl_env_init0(cle, debug);
662 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
665 cl_env_inc(CS_create);
666 cl_env_inc(CS_total);
669 env = ERR_PTR(-ENOMEM);
673 static void cl_env_fini(struct cl_env *cle)
675 cl_env_dec(CS_total);
676 lu_context_fini(&cle->ce_lu.le_ctx);
677 lu_context_fini(&cle->ce_ses);
678 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
681 static struct lu_env *cl_env_obtain(void *debug)
689 read_lock(&cl_envs[cpu].cec_guard);
690 LASSERT(equi(cl_envs[cpu].cec_count == 0,
691 list_empty(&cl_envs[cpu].cec_envs)));
692 if (cl_envs[cpu].cec_count > 0) {
695 cle = container_of(cl_envs[cpu].cec_envs.next, struct cl_env,
697 list_del_init(&cle->ce_linkage);
698 cl_envs[cpu].cec_count--;
699 read_unlock(&cl_envs[cpu].cec_guard);
703 rc = lu_env_refill(env);
705 cl_env_init0(cle, debug);
706 lu_context_enter(&env->le_ctx);
707 lu_context_enter(&cle->ce_ses);
713 read_unlock(&cl_envs[cpu].cec_guard);
715 env = cl_env_new(lu_context_tags_default,
716 lu_session_tags_default, debug);
721 static inline struct cl_env *cl_env_container(struct lu_env *env)
723 return container_of(env, struct cl_env, ce_lu);
727 * Returns lu_env: if there already is an environment associated with the
728 * current thread, it is returned, otherwise, new environment is allocated.
730 * Allocations are amortized through the global cache of environments.
732 * \param refcheck pointer to a counter used to detect environment leaks. In
733 * the usual case cl_env_get() and cl_env_put() are called in the same lexical
734 * scope and pointer to the same integer is passed as \a refcheck. This is
735 * used to detect missed cl_env_put().
739 struct lu_env *cl_env_get(__u16 *refcheck)
743 env = cl_env_obtain(__builtin_return_address(0));
747 cle = cl_env_container(env);
748 *refcheck = cle->ce_ref;
749 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
753 EXPORT_SYMBOL(cl_env_get);
756 * Forces an allocation of a fresh environment with given tags.
760 struct lu_env *cl_env_alloc(__u16 *refcheck, __u32 tags)
764 env = cl_env_new(tags, tags, __builtin_return_address(0));
768 cle = cl_env_container(env);
769 *refcheck = cle->ce_ref;
770 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
774 EXPORT_SYMBOL(cl_env_alloc);
776 static void cl_env_exit(struct cl_env *cle)
778 lu_context_exit(&cle->ce_lu.le_ctx);
779 lu_context_exit(&cle->ce_ses);
783 * Finalizes and frees a given number of cached environments. This is done to
784 * (1) free some memory (not currently hooked into VM), or (2) release
785 * references to modules.
787 unsigned cl_env_cache_purge(unsigned nr)
793 for_each_possible_cpu(i) {
794 write_lock(&cl_envs[i].cec_guard);
795 for (; !list_empty(&cl_envs[i].cec_envs) && nr > 0; --nr) {
796 cle = container_of(cl_envs[i].cec_envs.next,
797 struct cl_env, ce_linkage);
798 list_del_init(&cle->ce_linkage);
799 LASSERT(cl_envs[i].cec_count > 0);
800 cl_envs[i].cec_count--;
801 write_unlock(&cl_envs[i].cec_guard);
804 write_lock(&cl_envs[i].cec_guard);
806 LASSERT(equi(cl_envs[i].cec_count == 0,
807 list_empty(&cl_envs[i].cec_envs)));
808 write_unlock(&cl_envs[i].cec_guard);
812 EXPORT_SYMBOL(cl_env_cache_purge);
815 * Release an environment.
817 * Decrement \a env reference counter. When counter drops to 0, nothing in
818 * this thread is using environment and it is returned to the allocation
819 * cache, or freed straight away, if cache is large enough.
821 void cl_env_put(struct lu_env *env, __u16 *refcheck)
825 cle = cl_env_container(env);
827 LASSERT(cle->ce_ref > 0);
828 LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
830 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
831 if (--cle->ce_ref == 0) {
835 cle->ce_debug = NULL;
838 * Don't bother to take a lock here.
840 * Return environment to the cache only when it was allocated
841 * with the standard tags.
843 if (cl_envs[cpu].cec_count < cl_envs_cached_max &&
844 (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == lu_context_tags_default &&
845 (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == lu_session_tags_default) {
846 read_lock(&cl_envs[cpu].cec_guard);
847 list_add(&cle->ce_linkage, &cl_envs[cpu].cec_envs);
848 cl_envs[cpu].cec_count++;
849 read_unlock(&cl_envs[cpu].cec_guard);
855 EXPORT_SYMBOL(cl_env_put);
858 * Converts struct cl_attr to struct ost_lvb.
862 void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr)
864 lvb->lvb_size = attr->cat_size;
865 lvb->lvb_mtime = attr->cat_mtime;
866 lvb->lvb_atime = attr->cat_atime;
867 lvb->lvb_ctime = attr->cat_ctime;
868 lvb->lvb_blocks = attr->cat_blocks;
872 * Converts struct ost_lvb to struct cl_attr.
876 void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
878 attr->cat_size = lvb->lvb_size;
879 attr->cat_mtime = lvb->lvb_mtime;
880 attr->cat_atime = lvb->lvb_atime;
881 attr->cat_ctime = lvb->lvb_ctime;
882 attr->cat_blocks = lvb->lvb_blocks;
884 EXPORT_SYMBOL(cl_lvb2attr);
886 static struct cl_env cl_env_percpu[NR_CPUS];
888 static int cl_env_percpu_init(void)
891 int tags = LCT_REMEMBER | LCT_NOREF;
895 for_each_possible_cpu(i) {
898 rwlock_init(&cl_envs[i].cec_guard);
899 INIT_LIST_HEAD(&cl_envs[i].cec_envs);
900 cl_envs[i].cec_count = 0;
902 cle = &cl_env_percpu[i];
905 INIT_LIST_HEAD(&cle->ce_linkage);
906 cle->ce_magic = &cl_env_init0;
907 rc = lu_env_init(env, LCT_CL_THREAD | tags);
909 rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags);
911 lu_context_enter(&cle->ce_ses);
912 env->le_ses = &cle->ce_ses;
921 /* Indices 0 to i (excluding i) were correctly initialized,
922 * thus we must uninitialize up to i, the rest are undefined. */
923 for (j = 0; j < i; j++) {
924 cle = &cl_env_percpu[j];
925 lu_context_exit(&cle->ce_ses);
926 lu_context_fini(&cle->ce_ses);
927 lu_env_fini(&cle->ce_lu);
934 static void cl_env_percpu_fini(void)
938 for_each_possible_cpu(i) {
939 struct cl_env *cle = &cl_env_percpu[i];
941 lu_context_exit(&cle->ce_ses);
942 lu_context_fini(&cle->ce_ses);
943 lu_env_fini(&cle->ce_lu);
947 static void cl_env_percpu_refill(void)
951 for_each_possible_cpu(i)
952 lu_env_refill(&cl_env_percpu[i].ce_lu);
955 void cl_env_percpu_put(struct lu_env *env)
960 cpu = smp_processor_id();
961 cle = cl_env_container(env);
962 LASSERT(cle == &cl_env_percpu[cpu]);
965 LASSERT(cle->ce_ref == 0);
968 cle->ce_debug = NULL;
972 EXPORT_SYMBOL(cl_env_percpu_put);
974 struct lu_env *cl_env_percpu_get(void)
978 cle = &cl_env_percpu[get_cpu()];
979 cl_env_init0(cle, __builtin_return_address(0));
983 EXPORT_SYMBOL(cl_env_percpu_get);
985 /*****************************************************************************
987 * Temporary prototype thing: mirror obd-devices into cl devices.
991 struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
992 struct lu_device_type *ldt,
993 struct lu_device *next)
995 const char *typename;
998 LASSERT(ldt != NULL);
1000 typename = ldt->ldt_name;
1001 d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
1007 rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
1010 lu_ref_add(&d->ld_reference,
1011 "lu-stack", &lu_site_init);
1013 ldt->ldt_ops->ldto_device_free(env, d);
1014 CERROR("can't init device '%s', %d\n", typename, rc);
1018 CERROR("Cannot allocate device: '%s'\n", typename);
1019 return lu2cl_dev(d);
1021 EXPORT_SYMBOL(cl_type_setup);
1024 * Finalize device stack by calling lu_stack_fini().
1026 void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
1028 lu_stack_fini(env, cl2lu_dev(cl));
1030 EXPORT_SYMBOL(cl_stack_fini);
1032 static struct lu_context_key cl_key;
1034 struct cl_thread_info *cl_env_info(const struct lu_env *env)
1036 return lu_context_key_get(&env->le_ctx, &cl_key);
1039 /* defines cl_key_{init,fini}() */
1040 LU_KEY_INIT_FINI(cl, struct cl_thread_info);
1042 static struct lu_context_key cl_key = {
1043 .lct_tags = LCT_CL_THREAD,
1044 .lct_init = cl_key_init,
1045 .lct_fini = cl_key_fini,
1048 static struct lu_kmem_descr cl_object_caches[] = {
1050 .ckd_cache = &cl_env_kmem,
1051 .ckd_name = "cl_env_kmem",
1052 .ckd_size = sizeof (struct cl_env)
1060 * Global initialization of cl-data. Create kmem caches, register
1061 * lu_context_key's, etc.
1063 * \see cl_global_fini()
1065 int cl_global_init(void)
1069 OBD_ALLOC(cl_envs, sizeof(*cl_envs) * num_possible_cpus());
1070 if (cl_envs == NULL)
1071 GOTO(out, result = -ENOMEM);
1073 result = lu_kmem_init(cl_object_caches);
1075 GOTO(out_envs, result);
1077 LU_CONTEXT_KEY_INIT(&cl_key);
1078 result = lu_context_key_register(&cl_key);
1080 GOTO(out_kmem, result);
1082 result = cl_env_percpu_init();
1083 if (result) /* no cl_env_percpu_fini on error */
1084 GOTO(out_keys, result);
1089 lu_context_key_degister(&cl_key);
1091 lu_kmem_fini(cl_object_caches);
1093 OBD_FREE(cl_envs, sizeof(*cl_envs) * num_possible_cpus());
1099 * Finalization of global cl-data. Dual to cl_global_init().
1101 void cl_global_fini(void)
1103 cl_env_percpu_fini();
1104 lu_context_key_degister(&cl_key);
1105 lu_kmem_fini(cl_object_caches);
1106 OBD_FREE(cl_envs, sizeof(*cl_envs) * num_possible_cpus());