1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
5 * Use is subject to license terms.
7 * Copyright (c) 2011, 2017, Intel Corporation.
11 * This file is part of Lustre, http://www.lustre.org/
13 * Client Lustre Object.
15 * Author: Nikita Danilov <nikita.danilov@sun.com>
16 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
28 #define DEBUG_SUBSYSTEM S_CLASS
30 #include <linux/list.h>
31 #include <libcfs/libcfs.h>
32 #include <obd_class.h>
33 #include <obd_support.h>
34 #include <lustre_fid.h>
35 #include <cl_object.h>
36 #include <lu_object.h>
37 #include "cl_internal.h"
39 static struct kmem_cache *cl_env_kmem;
40 struct kmem_cache *cl_dio_aio_kmem;
41 struct kmem_cache *cl_sub_dio_kmem;
42 struct kmem_cache *cl_page_kmem_array[16];
43 unsigned short cl_page_kmem_size_array[16];
45 /** Lock class of cl_object_header::coh_attr_guard */
46 static struct lock_class_key cl_attr_guard_class;
49 * Initialize cl_object_header.
51 int cl_object_header_init(struct cl_object_header *h)
56 result = lu_object_header_init(&h->coh_lu);
58 spin_lock_init(&h->coh_attr_guard);
59 lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
60 h->coh_page_bufsize = 0;
64 EXPORT_SYMBOL(cl_object_header_init);
67 * Finalize cl_object_header.
69 void cl_object_header_fini(struct cl_object_header *h)
71 lu_object_header_fini(&h->coh_lu);
75 * Returns a cl_object with a given \a fid.
77 * Returns either cached or newly created object. Additional reference on the
78 * returned object is acquired.
80 * \see lu_object_find(), cl_page_find(), cl_lock_find()
82 struct cl_object *cl_object_find(const struct lu_env *env,
83 struct cl_device *cd, const struct lu_fid *fid,
84 const struct cl_object_conf *c)
87 return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
89 EXPORT_SYMBOL(cl_object_find);
92 * Releases a reference on \a o.
94 * When last reference is released object is returned to the cache, unless
95 * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
97 * \see cl_page_put(), cl_lock_put().
99 void cl_object_put(const struct lu_env *env, struct cl_object *o)
101 lu_object_put(env, &o->co_lu);
103 EXPORT_SYMBOL(cl_object_put);
106 * Acquire an additional reference to the object \a o.
108 * This can only be used to acquire _additional_ reference, i.e., caller
109 * already has to possess at least one reference to \a o before calling this.
111 * \see cl_page_get(), cl_lock_get().
113 void cl_object_get(struct cl_object *o)
115 lu_object_get(&o->co_lu);
117 EXPORT_SYMBOL(cl_object_get);
120 * Returns the top-object for a given \a o.
124 struct cl_object *cl_object_top(struct cl_object *o)
126 struct cl_object_header *hdr = cl_object_header(o);
127 struct cl_object *top;
129 while (hdr->coh_parent != NULL)
130 hdr = hdr->coh_parent;
132 top = lu2cl(lu_object_top(&hdr->coh_lu));
133 CDEBUG(D_TRACE, "%p -> %p\n", o, top);
136 EXPORT_SYMBOL(cl_object_top);
139 * Returns pointer to the lock protecting data-attributes for the given object
142 * Data-attributes are protected by the cl_object_header::coh_attr_guard
143 * spin-lock in the top-object.
145 * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
147 static spinlock_t *cl_object_attr_guard(struct cl_object *o)
149 return &cl_object_header(cl_object_top(o))->coh_attr_guard;
153 * Locks data-attributes.
155 * Prevents data-attributes from changing, until lock is released by
156 * cl_object_attr_unlock(). This has to be called before calls to
157 * cl_object_attr_get(), cl_object_attr_update().
159 void cl_object_attr_lock(struct cl_object *o)
160 __acquires(cl_object_attr_guard(o))
162 spin_lock(cl_object_attr_guard(o));
164 EXPORT_SYMBOL(cl_object_attr_lock);
167 * Releases data-attributes lock, acquired by cl_object_attr_lock().
169 void cl_object_attr_unlock(struct cl_object *o)
170 __releases(cl_object_attr_guard(o))
172 spin_unlock(cl_object_attr_guard(o));
174 EXPORT_SYMBOL(cl_object_attr_unlock);
177 * Returns data-attributes of an object \a obj.
179 * Every layer is asked (by calling cl_object_operations::coo_attr_get())
180 * top-to-bottom to fill in parts of \a attr that this layer is responsible
183 int cl_object_attr_get(const struct lu_env *env, struct cl_object *top,
184 struct cl_attr *attr)
186 struct cl_object *obj;
189 assert_spin_locked(cl_object_attr_guard(top));
192 cl_object_for_each(obj, top) {
193 if (obj->co_ops->coo_attr_get != NULL) {
194 result = obj->co_ops->coo_attr_get(env, obj, attr);
204 EXPORT_SYMBOL(cl_object_attr_get);
207 * Updates data-attributes of an object \a obj.
209 * Only attributes, mentioned in a validness bit-mask \a v are
210 * updated. Calls cl_object_operations::coo_upd_attr() on every layer, bottom
213 int cl_object_attr_update(const struct lu_env *env, struct cl_object *top,
214 const struct cl_attr *attr, enum cl_attr_valid v)
216 struct cl_object *obj;
219 assert_spin_locked(cl_object_attr_guard(top));
222 cl_object_for_each_reverse(obj, top) {
223 if (obj->co_ops->coo_attr_update != NULL) {
224 result = obj->co_ops->coo_attr_update(env, obj, attr,
235 EXPORT_SYMBOL(cl_object_attr_update);
238 * Mark the inode as dirty when the inode has uncommitted (unstable) pages.
239 * Thus when the system is under momory pressure, it will trigger writeback
240 * on background to commit and unpin the pages.
242 void cl_object_dirty_for_sync(const struct lu_env *env, struct cl_object *top)
244 struct cl_object *obj;
248 cl_object_for_each(obj, top) {
249 if (obj->co_ops->coo_dirty_for_sync != NULL)
250 obj->co_ops->coo_dirty_for_sync(env, obj);
254 EXPORT_SYMBOL(cl_object_dirty_for_sync);
257 * Notifies layers (bottom-to-top) that glimpse AST was received.
259 * Layers have to fill \a lvb fields with information that will be shipped
260 * back to glimpse issuer.
262 * \see cl_lock_operations::clo_glimpse()
264 int cl_object_glimpse(const struct lu_env *env, struct cl_object *top,
267 struct cl_object *obj;
271 cl_object_for_each_reverse(obj, top) {
272 if (obj->co_ops->coo_glimpse != NULL) {
273 result = obj->co_ops->coo_glimpse(env, obj, lvb);
278 LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top->co_lu.lo_header),
279 "size: %llu mtime: %llu atime: %llu "
280 "ctime: %llu blocks: %llu\n",
281 lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
282 lvb->lvb_ctime, lvb->lvb_blocks);
285 EXPORT_SYMBOL(cl_object_glimpse);
288 * Updates a configuration of an object \a obj.
290 int cl_conf_set(const struct lu_env *env, struct cl_object *top,
291 const struct cl_object_conf *conf)
293 struct cl_object *obj;
297 cl_object_for_each(obj, top) {
298 if (obj->co_ops->coo_conf_set != NULL) {
299 result = obj->co_ops->coo_conf_set(env, obj, conf);
306 EXPORT_SYMBOL(cl_conf_set);
309 * Prunes caches of pages and locks for this object.
311 int cl_object_prune(const struct lu_env *env, struct cl_object *top)
313 struct cl_object *obj;
317 cl_object_for_each(obj, top) {
318 if (obj->co_ops->coo_prune != NULL) {
319 result = obj->co_ops->coo_prune(env, obj);
327 EXPORT_SYMBOL(cl_object_prune);
330 * Get stripe information of this object.
332 int cl_object_getstripe(const struct lu_env *env, struct cl_object *top,
333 struct lov_user_md __user *uarg, size_t size)
335 struct cl_object *obj;
339 cl_object_for_each(obj, top) {
340 if (obj->co_ops->coo_getstripe) {
341 result = obj->co_ops->coo_getstripe(env, obj, uarg,
349 EXPORT_SYMBOL(cl_object_getstripe);
352 * Get fiemap extents from file object.
354 * \param env [in] lustre environment
355 * \param obj [in] file object
356 * \param key [in] fiemap request argument
357 * \param fiemap [out] fiemap extents mapping retrived
358 * \param buflen [in] max buffer length of @fiemap
363 int cl_object_fiemap(const struct lu_env *env, struct cl_object *top,
364 struct ll_fiemap_info_key *key,
365 struct fiemap *fiemap, size_t *buflen)
367 struct cl_object *obj;
371 cl_object_for_each(obj, top) {
372 if (obj->co_ops->coo_fiemap) {
373 result = obj->co_ops->coo_fiemap(env, obj, key, fiemap,
381 EXPORT_SYMBOL(cl_object_fiemap);
383 int cl_object_layout_get(const struct lu_env *env, struct cl_object *top,
384 struct cl_layout *cl)
386 struct cl_object *obj;
389 cl_object_for_each(obj, top) {
390 if (obj->co_ops->coo_layout_get)
391 return obj->co_ops->coo_layout_get(env, obj, cl);
396 EXPORT_SYMBOL(cl_object_layout_get);
398 loff_t cl_object_maxbytes(struct cl_object *top)
400 struct cl_object *obj;
401 loff_t maxbytes = LLONG_MAX;
404 cl_object_for_each(obj, top) {
405 if (obj->co_ops->coo_maxbytes)
406 maxbytes = min_t(loff_t, obj->co_ops->coo_maxbytes(obj),
412 EXPORT_SYMBOL(cl_object_maxbytes);
414 int cl_object_flush(const struct lu_env *env, struct cl_object *top,
415 struct ldlm_lock *lock)
417 struct cl_object *obj;
421 cl_object_for_each(obj, top) {
422 if (obj->co_ops->coo_object_flush) {
423 rc = obj->co_ops->coo_object_flush(env, obj, lock);
430 EXPORT_SYMBOL(cl_object_flush);
432 int cl_object_inode_ops(const struct lu_env *env, struct cl_object *top,
433 enum coo_inode_opc opc, void *data)
435 struct cl_object *obj;
440 cl_object_for_each(obj, top) {
441 if (obj->co_ops->coo_inode_ops) {
442 rc = obj->co_ops->coo_inode_ops(env, obj, opc, data);
449 EXPORT_SYMBOL(cl_object_inode_ops);
451 void cl_req_projid_set(const struct lu_env *env, struct cl_object *top,
454 struct cl_object *obj;
458 cl_object_for_each(obj, top) {
459 if (obj->co_ops->coo_req_projid_set)
460 obj->co_ops->coo_req_projid_set(env, obj, projid);
464 EXPORT_SYMBOL(cl_req_projid_set);
467 * Helper function removing all object locks, and marking object for
468 * deletion. All object pages must have been deleted at this point.
470 * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
471 * and sub- objects respectively.
473 void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
475 struct cl_object_header *hdr = cl_object_header(obj);
477 set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
479 EXPORT_SYMBOL(cl_object_kill);
481 void cache_stats_init(struct cache_stats *cs, const char *name)
486 for (i = 0; i < CS_NR; i++)
487 atomic_set(&cs->cs_stats[i], 0);
490 static int cache_stats_print(const struct cache_stats *cs,
491 struct seq_file *m, int h)
496 * lookup hit total cached create
497 * env: ...... ...... ...... ...... ......
500 const char *names[CS_NR] = CS_NAMES;
502 seq_printf(m, "%6s", " ");
503 for (i = 0; i < CS_NR; i++)
504 seq_printf(m, "%8s", names[i]);
508 seq_printf(m, "%5.5s:", cs->cs_name);
509 for (i = 0; i < CS_NR; i++)
510 seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i]));
514 static void cl_env_percpu_refill(void);
517 * Initialize client site.
519 * Perform common initialization (lu_site_init()), and initialize statistical
520 * counters. Also perform global initializations on the first call.
522 int cl_site_init(struct cl_site *s, struct cl_device *d)
527 result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
529 cache_stats_init(&s->cs_pages, "pages");
530 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
531 atomic_set(&s->cs_pages_state[0], 0);
532 cl_env_percpu_refill();
536 EXPORT_SYMBOL(cl_site_init);
539 * Finalize client site. Dual to cl_site_init().
541 void cl_site_fini(struct cl_site *s)
543 lu_site_fini(&s->cs_lu);
545 EXPORT_SYMBOL(cl_site_fini);
547 static struct cache_stats cl_env_stats = {
549 .cs_stats = { ATOMIC_INIT(0), }
553 * Outputs client site statistical counters into a buffer. Suitable for
554 * ll_rd_*()-style functions.
556 int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
558 static const char *const pstate[] = {
568 lookup hit total busy create
569 pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
570 locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
571 env: ...... ...... ...... ...... ......
573 lu_site_stats_seq_print(&site->cs_lu, m);
574 cache_stats_print(&site->cs_pages, m, 1);
576 for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
577 seq_printf(m, "%s: %u ", pstate[i],
578 atomic_read(&site->cs_pages_state[i]));
579 seq_printf(m, "]\n");
580 cache_stats_print(&cl_env_stats, m, 0);
584 EXPORT_SYMBOL(cl_site_stats_print);
586 /*****************************************************************************
588 * lu_env handling on client.
592 static unsigned cl_envs_cached_max = 32; /* XXX: prototype: arbitrary limit
594 static struct cl_env_cache {
597 struct list_head cec_envs;
603 struct lu_context ce_ses;
606 * Linkage into global list of all client environments. Used for
607 * garbage collection.
609 struct list_head ce_linkage;
615 * Debugging field: address of the caller who made original
621 static void cl_env_inc(enum cache_stats_item item)
623 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
624 atomic_inc(&cl_env_stats.cs_stats[item]);
628 static void cl_env_dec(enum cache_stats_item item)
630 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
631 LASSERT(atomic_read(&cl_env_stats.cs_stats[item]) > 0);
632 atomic_dec(&cl_env_stats.cs_stats[item]);
636 static void cl_env_init0(struct cl_env *cle, void *debug)
638 LASSERT(cle->ce_ref == 0);
639 LASSERT(cle->ce_magic == &cl_env_init0);
640 LASSERT(cle->ce_debug == NULL);
643 cle->ce_debug = debug;
647 static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
652 OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, GFP_NOFS);
656 INIT_LIST_HEAD(&cle->ce_linkage);
657 cle->ce_magic = &cl_env_init0;
659 rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
661 rc = lu_context_init(&cle->ce_ses,
662 LCT_SESSION | ses_tags);
664 lu_context_enter(&cle->ce_ses);
665 env->le_ses = &cle->ce_ses;
666 cl_env_init0(cle, debug);
671 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
674 cl_env_inc(CS_create);
675 cl_env_inc(CS_total);
678 env = ERR_PTR(-ENOMEM);
682 static void cl_env_fini(struct cl_env *cle)
684 cl_env_dec(CS_total);
685 lu_context_fini(&cle->ce_lu.le_ctx);
686 lu_context_fini(&cle->ce_ses);
687 OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
690 /* Get a cl_env, either from the per-CPU cache for the current CPU, or by
691 * allocating a new one.
693 static struct lu_env *cl_env_obtain(void *debug)
701 read_lock(&cl_envs[cpu].cec_guard);
702 LASSERT(equi(cl_envs[cpu].cec_count == 0,
703 list_empty(&cl_envs[cpu].cec_envs)));
704 if (cl_envs[cpu].cec_count > 0) {
707 cle = container_of(cl_envs[cpu].cec_envs.next, struct cl_env,
709 list_del_init(&cle->ce_linkage);
710 cl_envs[cpu].cec_count--;
711 read_unlock(&cl_envs[cpu].cec_guard);
715 rc = lu_env_refill(env);
717 cl_env_init0(cle, debug);
718 lu_context_enter(&env->le_ctx);
719 lu_context_enter(&cle->ce_ses);
725 read_unlock(&cl_envs[cpu].cec_guard);
727 env = cl_env_new(lu_context_tags_default,
728 lu_session_tags_default, debug);
733 static inline struct cl_env *cl_env_container(struct lu_env *env)
735 return container_of(env, struct cl_env, ce_lu);
741 * No link to thread, this returns an env from the cache or
742 * allocates a new one.
744 * If you need to get the specific environment you created for this thread,
745 * you must either pass the pointer directly or store it in the file/inode
746 * private data and retrieve it from there using ll_cl_add/ll_cl_find.
748 * \param refcheck pointer to a counter used to detect environment leaks. In
749 * the usual case cl_env_get() and cl_env_put() are called in the same lexical
750 * scope and pointer to the same integer is passed as \a refcheck. This is
751 * used to detect missed cl_env_put().
755 struct lu_env *cl_env_get(__u16 *refcheck)
759 env = cl_env_obtain(__builtin_return_address(0));
763 cle = cl_env_container(env);
764 *refcheck = cle->ce_ref;
765 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
769 EXPORT_SYMBOL(cl_env_get);
772 * Forces an allocation of a fresh environment with given tags.
776 struct lu_env *cl_env_alloc(__u16 *refcheck, __u32 tags)
780 env = cl_env_new(tags, tags, __builtin_return_address(0));
784 cle = cl_env_container(env);
785 *refcheck = cle->ce_ref;
786 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
790 EXPORT_SYMBOL(cl_env_alloc);
792 static void cl_env_exit(struct cl_env *cle)
794 lu_context_exit(&cle->ce_lu.le_ctx);
795 lu_context_exit(&cle->ce_ses);
799 * Finalizes and frees a given number of cached environments. This is done to
800 * (1) free some memory (not currently hooked into VM), or (2) release
801 * references to modules.
803 unsigned cl_env_cache_purge(unsigned nr)
809 for_each_possible_cpu(i) {
810 write_lock(&cl_envs[i].cec_guard);
811 for (; !list_empty(&cl_envs[i].cec_envs) && nr > 0; --nr) {
812 cle = container_of(cl_envs[i].cec_envs.next,
813 struct cl_env, ce_linkage);
814 list_del_init(&cle->ce_linkage);
815 LASSERT(cl_envs[i].cec_count > 0);
816 cl_envs[i].cec_count--;
817 write_unlock(&cl_envs[i].cec_guard);
820 write_lock(&cl_envs[i].cec_guard);
822 LASSERT(equi(cl_envs[i].cec_count == 0,
823 list_empty(&cl_envs[i].cec_envs)));
824 write_unlock(&cl_envs[i].cec_guard);
828 EXPORT_SYMBOL(cl_env_cache_purge);
831 * Release an environment.
833 * Decrement \a env reference counter. When counter drops to 0, nothing in
834 * this thread is using environment and it is returned to the per-CPU cache or
835 * freed immediately if the cache is full.
837 void cl_env_put(struct lu_env *env, __u16 *refcheck)
841 cle = cl_env_container(env);
843 LASSERT(cle->ce_ref > 0);
844 LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
846 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
847 if (--cle->ce_ref == 0) {
851 cle->ce_debug = NULL;
854 * Don't bother to take a lock here.
856 * Return environment to the cache only when it was allocated
857 * with the standard tags.
859 if (cl_envs[cpu].cec_count < cl_envs_cached_max &&
860 (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == lu_context_tags_default &&
861 (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == lu_session_tags_default) {
862 read_lock(&cl_envs[cpu].cec_guard);
863 list_add(&cle->ce_linkage, &cl_envs[cpu].cec_envs);
864 cl_envs[cpu].cec_count++;
865 read_unlock(&cl_envs[cpu].cec_guard);
871 EXPORT_SYMBOL(cl_env_put);
874 * Converts struct cl_attr to struct ost_lvb.
878 void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr)
880 lvb->lvb_size = attr->cat_size;
881 lvb->lvb_mtime = attr->cat_mtime;
882 lvb->lvb_atime = attr->cat_atime;
883 lvb->lvb_ctime = attr->cat_ctime;
884 lvb->lvb_blocks = attr->cat_blocks;
888 * Converts struct ost_lvb to struct cl_attr.
892 void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
894 attr->cat_size = lvb->lvb_size;
895 attr->cat_mtime = lvb->lvb_mtime;
896 attr->cat_atime = lvb->lvb_atime;
897 attr->cat_ctime = lvb->lvb_ctime;
898 attr->cat_blocks = lvb->lvb_blocks;
900 EXPORT_SYMBOL(cl_lvb2attr);
902 static struct cl_env cl_env_percpu[NR_CPUS];
903 static DEFINE_MUTEX(cl_env_percpu_mutex);
905 static int cl_env_percpu_init(void)
908 int tags = LCT_REMEMBER | LCT_NOREF;
912 for_each_possible_cpu(i) {
915 rwlock_init(&cl_envs[i].cec_guard);
916 INIT_LIST_HEAD(&cl_envs[i].cec_envs);
917 cl_envs[i].cec_count = 0;
919 cle = &cl_env_percpu[i];
922 INIT_LIST_HEAD(&cle->ce_linkage);
923 cle->ce_magic = &cl_env_init0;
924 rc = lu_env_init(env, LCT_CL_THREAD | tags);
926 rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags);
928 lu_context_enter(&cle->ce_ses);
929 env->le_ses = &cle->ce_ses;
938 /* Indices 0 to i (excluding i) were correctly initialized,
939 * thus we must uninitialize up to i, the rest are undefined. */
940 for (j = 0; j < i; j++) {
941 cle = &cl_env_percpu[j];
942 lu_context_exit(&cle->ce_ses);
943 lu_context_fini(&cle->ce_ses);
944 lu_env_fini(&cle->ce_lu);
951 static void cl_env_percpu_fini(void)
955 for_each_possible_cpu(i) {
956 struct cl_env *cle = &cl_env_percpu[i];
958 lu_context_exit(&cle->ce_ses);
959 lu_context_fini(&cle->ce_ses);
960 lu_env_fini(&cle->ce_lu);
964 static void cl_env_percpu_refill(void)
968 mutex_lock(&cl_env_percpu_mutex);
969 for_each_possible_cpu(i)
970 lu_env_refill(&cl_env_percpu[i].ce_lu);
971 mutex_unlock(&cl_env_percpu_mutex);
974 void cl_env_percpu_put(struct lu_env *env)
979 cpu = smp_processor_id();
980 cle = cl_env_container(env);
981 LASSERT(cle == &cl_env_percpu[cpu]);
984 LASSERT(cle->ce_ref == 0);
987 cle->ce_debug = NULL;
991 EXPORT_SYMBOL(cl_env_percpu_put);
993 struct lu_env *cl_env_percpu_get(void)
997 cle = &cl_env_percpu[get_cpu()];
998 cl_env_init0(cle, __builtin_return_address(0));
1002 EXPORT_SYMBOL(cl_env_percpu_get);
1004 /*****************************************************************************
1006 * Temporary prototype thing: mirror obd-devices into cl devices.
1010 struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
1011 struct lu_device_type *ldt,
1012 struct lu_device *next)
1014 const char *typename;
1015 struct lu_device *d;
1019 typename = ldt->ldt_name;
1020 d = ldto_device_alloc(env, ldt, NULL);
1027 rc = ldto_device_init(env, d, typename, next);
1031 ldto_device_free(env, d);
1032 CERROR("can't init device '%s', %d\n", typename, rc);
1036 CERROR("Cannot allocate device: '%s'\n", typename);
1039 return lu2cl_dev(d);
1041 EXPORT_SYMBOL(cl_type_setup);
1043 static struct lu_context_key cl_key;
1045 struct cl_thread_info *cl_env_info(const struct lu_env *env)
1047 return lu_context_key_get(&env->le_ctx, &cl_key);
1050 /* defines cl_key_{init,fini}() */
1051 LU_KEY_INIT_FINI(cl, struct cl_thread_info);
1053 static struct lu_context_key cl_key = {
1054 .lct_tags = LCT_CL_THREAD,
1055 .lct_init = cl_key_init,
1056 .lct_fini = cl_key_fini,
1059 static struct lu_kmem_descr cl_object_caches[] = {
1061 .ckd_cache = &cl_env_kmem,
1062 .ckd_name = "cl_env_kmem",
1063 .ckd_size = sizeof(struct cl_env)
1066 .ckd_cache = &cl_dio_aio_kmem,
1067 .ckd_name = "cl_dio_aio_kmem",
1068 .ckd_size = sizeof(struct cl_dio_aio)
1071 .ckd_cache = &cl_sub_dio_kmem,
1072 .ckd_name = "cl_sub_dio_kmem",
1073 .ckd_size = sizeof(struct cl_sub_dio)
1081 * Global initialization of cl-data. Create kmem caches, register
1082 * lu_context_key's, etc.
1084 * \see cl_global_fini()
1086 int cl_global_init(void)
1090 OBD_ALLOC_PTR_ARRAY(cl_envs, num_possible_cpus());
1091 if (cl_envs == NULL)
1092 GOTO(out, result = -ENOMEM);
1094 result = lu_kmem_init(cl_object_caches);
1096 GOTO(out_envs, result);
1098 LU_CONTEXT_KEY_INIT(&cl_key);
1099 result = lu_context_key_register(&cl_key);
1101 GOTO(out_kmem, result);
1103 result = cl_env_percpu_init();
1104 if (result) /* no cl_env_percpu_fini on error */
1105 GOTO(out_keys, result);
1110 lu_context_key_degister(&cl_key);
1112 lu_kmem_fini(cl_object_caches);
1114 OBD_FREE_PTR_ARRAY(cl_envs, num_possible_cpus());
1120 * Finalization of global cl-data. Dual to cl_global_init().
1122 void cl_global_fini(void)
1126 for (i = 0; i < ARRAY_SIZE(cl_page_kmem_array); i++) {
1127 if (cl_page_kmem_array[i]) {
1128 kmem_cache_destroy(cl_page_kmem_array[i]);
1129 cl_page_kmem_array[i] = NULL;
1132 cl_env_percpu_fini();
1133 lu_context_key_degister(&cl_key);
1134 lu_kmem_fini(cl_object_caches);
1135 OBD_FREE_PTR_ARRAY(cl_envs, num_possible_cpus());