1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_object.c
39 * These are the only exported functions, they provide some generic
40 * infrastructure for managing object devices
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
45 #define DEBUG_SUBSYSTEM S_CLASS
47 # define EXPORT_SYMTAB
50 #include <libcfs/libcfs.h>
53 # include <linux/module.h>
57 #include <libcfs/libcfs_hash.h>
58 #include <obd_class.h>
59 #include <obd_support.h>
60 #include <lustre_disk.h>
61 #include <lustre_fid.h>
62 #include <lu_object.h>
63 #include <libcfs/list.h>
64 /* lu_time_global_{init,fini}() */
67 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
70 * Decrease reference counter on object. If last reference is freed, return
71 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
72 * case, free object immediately.
74 void lu_object_put(const struct lu_env *env, struct lu_object *o)
76 struct lu_object_header *top;
78 struct lu_object *orig;
82 site = o->lo_dev->ld_site;
85 cfs_write_lock(&site->ls_guard);
86 if (cfs_atomic_dec_and_test(&top->loh_ref)) {
88 * When last reference is released, iterate over object
89 * layers, and notify them that object is no longer busy.
91 cfs_list_for_each_entry_reverse(o, &top->loh_layers,
93 if (o->lo_ops->loo_object_release != NULL)
94 o->lo_ops->loo_object_release(env, o);
97 if (lu_object_is_dying(top)) {
99 * If object is dying (will not be cached), removed it
100 * from hash table and LRU.
102 * This is done with hash table and LRU lists
103 * locked. As the only way to acquire first reference
104 * to previously unreferenced object is through
105 * hash-table lookup (lu_object_find()), or LRU
106 * scanning (lu_site_purge()), that are done under
107 * hash-table and LRU lock, no race with concurrent
108 * object lookup is possible and we can safely destroy
111 cfs_hlist_del_init(&top->loh_hash);
112 cfs_list_del_init(&top->loh_lru);
117 cfs_write_unlock(&site->ls_guard);
120 * Object was already removed from hash and lru above, can
123 lu_object_free(env, orig);
125 EXPORT_SYMBOL(lu_object_put);
128 * Allocate new object.
130 * This follows object creation protocol, described in the comment within
131 * struct lu_device_operations definition.
133 static struct lu_object *lu_object_alloc(const struct lu_env *env,
134 struct lu_device *dev,
135 const struct lu_fid *f,
136 const struct lu_object_conf *conf)
138 struct lu_object *scan;
139 struct lu_object *top;
146 * Create top-level object slice. This will also create
149 top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
151 RETURN(ERR_PTR(-ENOMEM));
153 * This is the only place where object fid is assigned. It's constant
156 LASSERT(fid_is_igif(f) || fid_ver(f) == 0);
157 top->lo_header->loh_fid = *f;
158 layers = &top->lo_header->loh_layers;
161 * Call ->loo_object_init() repeatedly, until no more new
162 * object slices are created.
165 cfs_list_for_each_entry(scan, layers, lo_linkage) {
166 if (scan->lo_flags & LU_OBJECT_ALLOCATED)
169 scan->lo_header = top->lo_header;
170 result = scan->lo_ops->loo_object_init(env, scan, conf);
172 lu_object_free(env, top);
173 RETURN(ERR_PTR(result));
175 scan->lo_flags |= LU_OBJECT_ALLOCATED;
179 cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
180 if (scan->lo_ops->loo_object_start != NULL) {
181 result = scan->lo_ops->loo_object_start(env, scan);
183 lu_object_free(env, top);
184 RETURN(ERR_PTR(result));
189 dev->ld_site->ls_stats.s_created ++;
196 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
199 struct lu_object *scan;
200 struct lu_site *site;
203 site = o->lo_dev->ld_site;
204 layers = &o->lo_header->loh_layers;
206 * First call ->loo_object_delete() method to release all resources.
208 cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
209 if (scan->lo_ops->loo_object_delete != NULL)
210 scan->lo_ops->loo_object_delete(env, scan);
214 * Then, splice object layers into stand-alone list, and call
215 * ->loo_object_free() on all layers to free memory. Splice is
216 * necessary, because lu_object_header is freed together with the
219 CFS_INIT_LIST_HEAD(&splice);
220 cfs_list_splice_init(layers, &splice);
221 while (!cfs_list_empty(&splice)) {
223 * Free layers in bottom-to-top order, so that object header
224 * lives as long as possible and ->loo_object_free() methods
225 * can look at its contents.
227 o = container_of0(splice.prev, struct lu_object, lo_linkage);
228 cfs_list_del_init(&o->lo_linkage);
229 LASSERT(o->lo_ops->loo_object_free != NULL);
230 o->lo_ops->loo_object_free(env, o);
232 cfs_waitq_broadcast(&site->ls_marche_funebre);
236 * Free \a nr objects from the cold end of the site LRU list.
238 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
241 struct lu_object_header *h;
242 struct lu_object_header *temp;
244 CFS_INIT_LIST_HEAD(&dispose);
246 * Under LRU list lock, scan LRU list and move unreferenced objects to
247 * the dispose list, removing them from LRU and hash table.
249 cfs_write_lock(&s->ls_guard);
250 cfs_list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) {
252 * Objects are sorted in lru order, and "busy" objects (ones
253 * with h->loh_ref > 0) naturally tend to live near hot end
254 * that we scan last. Unfortunately, sites usually have small
255 * (less then ten) number of busy yet rarely accessed objects
256 * (some global objects, accessed directly through pointers,
257 * bypassing hash table). Currently algorithm scans them over
258 * and over again. Probably we should move busy objects out of
259 * LRU, or we can live with that.
263 if (cfs_atomic_read(&h->loh_ref) > 0)
265 cfs_hlist_del_init(&h->loh_hash);
266 cfs_list_move(&h->loh_lru, &dispose);
269 cfs_write_unlock(&s->ls_guard);
271 * Free everything on the dispose list. This is safe against races due
272 * to the reasons described in lu_object_put().
274 while (!cfs_list_empty(&dispose)) {
275 h = container_of0(dispose.next,
276 struct lu_object_header, loh_lru);
277 cfs_list_del_init(&h->loh_lru);
278 lu_object_free(env, lu_object_top(h));
279 s->ls_stats.s_lru_purged ++;
283 EXPORT_SYMBOL(lu_site_purge);
288 * Code below has to jump through certain loops to output object description
289 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
290 * composes object description from strings that are parts of _lines_ of
291 * output (i.e., strings that are not terminated by newline). This doesn't fit
292 * very well into libcfs_debug_msg() interface that assumes that each message
293 * supplied to it is a self-contained output line.
295 * To work around this, strings are collected in a temporary buffer
296 * (implemented as a value of lu_cdebug_key key), until terminating newline
297 * character is detected.
305 * XXX overflow is not handled correctly.
310 struct lu_cdebug_data {
314 char lck_area[LU_CDEBUG_LINE];
317 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
318 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
321 * Key, holding temporary buffer. This key is registered very early by
324 struct lu_context_key lu_global_key = {
325 .lct_tags = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD,
326 .lct_init = lu_global_key_init,
327 .lct_fini = lu_global_key_fini
331 * Printer function emitting messages through libcfs_debug_msg().
333 int lu_cdebug_printer(const struct lu_env *env,
334 void *cookie, const char *format, ...)
336 struct lu_cdebug_print_info *info = cookie;
337 struct lu_cdebug_data *key;
342 va_start(args, format);
344 key = lu_context_key_get(&env->le_ctx, &lu_global_key);
345 LASSERT(key != NULL);
347 used = strlen(key->lck_area);
348 complete = format[strlen(format) - 1] == '\n';
350 * Append new chunk to the buffer.
352 vsnprintf(key->lck_area + used,
353 ARRAY_SIZE(key->lck_area) - used, format, args);
355 if (cfs_cdebug_show(info->lpi_mask, info->lpi_subsys))
356 libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask,
357 (char *)info->lpi_file, info->lpi_fn,
358 info->lpi_line, "%s", key->lck_area);
359 key->lck_area[0] = 0;
364 EXPORT_SYMBOL(lu_cdebug_printer);
367 * Print object header.
369 void lu_object_header_print(const struct lu_env *env, void *cookie,
370 lu_printer_t printer,
371 const struct lu_object_header *hdr)
373 (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
374 hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
376 cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
377 cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
379 hdr->loh_attr & LOHA_EXISTS ? " exist":"");
381 EXPORT_SYMBOL(lu_object_header_print);
384 * Print human readable representation of the \a o to the \a printer.
386 void lu_object_print(const struct lu_env *env, void *cookie,
387 lu_printer_t printer, const struct lu_object *o)
389 static const char ruler[] = "........................................";
390 struct lu_object_header *top;
394 lu_object_header_print(env, cookie, printer, top);
395 (*printer)(env, cookie, "{ \n");
396 cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
397 depth = o->lo_depth + 4;
400 * print `.' \a depth times followed by type name and address
402 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
403 o->lo_dev->ld_type->ldt_name, o);
404 if (o->lo_ops->loo_object_print != NULL)
405 o->lo_ops->loo_object_print(env, cookie, printer, o);
406 (*printer)(env, cookie, "\n");
408 (*printer)(env, cookie, "} header@%p\n", top);
410 EXPORT_SYMBOL(lu_object_print);
413 * Check object consistency.
415 int lu_object_invariant(const struct lu_object *o)
417 struct lu_object_header *top;
420 cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
421 if (o->lo_ops->loo_object_invariant != NULL &&
422 !o->lo_ops->loo_object_invariant(o))
427 EXPORT_SYMBOL(lu_object_invariant);
429 static struct lu_object *htable_lookup(struct lu_site *s,
430 const cfs_hlist_head_t *bucket,
431 const struct lu_fid *f,
432 cfs_waitlink_t *waiter)
434 struct lu_object_header *h;
435 cfs_hlist_node_t *scan;
437 cfs_hlist_for_each_entry(h, scan, bucket, loh_hash) {
438 s->ls_stats.s_cache_check ++;
439 if (likely(lu_fid_eq(&h->loh_fid, f))) {
440 if (unlikely(lu_object_is_dying(h))) {
442 * Lookup found an object being destroyed;
443 * this object cannot be returned (to assure
444 * that references to dying objects are
445 * eventually drained), and moreover, lookup
446 * has to wait until object is freed.
448 cfs_waitlink_init(waiter);
449 cfs_waitq_add(&s->ls_marche_funebre, waiter);
450 cfs_set_current_state(CFS_TASK_UNINT);
451 s->ls_stats.s_cache_death_race ++;
452 return ERR_PTR(-EAGAIN);
454 /* bump reference count... */
455 if (cfs_atomic_add_return(1, &h->loh_ref) == 1)
457 /* and move to the head of the LRU */
459 * XXX temporary disable this to measure effects of
460 * read-write locking.
462 /* list_move_tail(&h->loh_lru, &s->ls_lru); */
463 s->ls_stats.s_cache_hit ++;
464 return lu_object_top(h);
467 s->ls_stats.s_cache_miss ++;
471 static __u32 fid_hash(const struct lu_fid *f, int bits)
473 /* all objects with same id and different versions will belong to same
474 * collisions list. */
475 return cfs_hash_long(fid_flatten(f), bits);
479 * Search cache for an object with the fid \a f. If such object is found,
480 * return it. Otherwise, create new object, insert it into cache and return
481 * it. In any case, additional reference is acquired on the returned object.
483 struct lu_object *lu_object_find(const struct lu_env *env,
484 struct lu_device *dev, const struct lu_fid *f,
485 const struct lu_object_conf *conf)
487 return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
489 EXPORT_SYMBOL(lu_object_find);
492 * Core logic of lu_object_find*() functions.
494 static struct lu_object *lu_object_find_try(const struct lu_env *env,
495 struct lu_device *dev,
496 const struct lu_fid *f,
497 const struct lu_object_conf *conf,
498 cfs_waitlink_t *waiter)
502 struct lu_object *shadow;
503 cfs_hlist_head_t *bucket;
506 * This uses standard index maintenance protocol:
508 * - search index under lock, and return object if found;
509 * - otherwise, unlock index, allocate new object;
510 * - lock index and search again;
511 * - if nothing is found (usual case), insert newly created
513 * - otherwise (race: other thread inserted object), free
514 * object just allocated.
518 * If dying object is found during index search, add @waiter to the
519 * site wait-queue and return ERR_PTR(-EAGAIN).
523 bucket = s->ls_hash + fid_hash(f, s->ls_hash_bits);
525 cfs_read_lock(&s->ls_guard);
526 o = htable_lookup(s, bucket, f, waiter);
527 cfs_read_unlock(&s->ls_guard);
533 * Allocate new object. This may result in rather complicated
534 * operations, including fld queries, inode loading, etc.
536 o = lu_object_alloc(env, dev, f, conf);
537 if (unlikely(IS_ERR(o)))
540 LASSERT(lu_fid_eq(lu_object_fid(o), f));
542 cfs_write_lock(&s->ls_guard);
543 shadow = htable_lookup(s, bucket, f, waiter);
544 if (likely(shadow == NULL)) {
545 cfs_hlist_add_head(&o->lo_header->loh_hash, bucket);
546 cfs_list_add_tail(&o->lo_header->loh_lru, &s->ls_lru);
552 s->ls_stats.s_cache_race ++;
553 cfs_write_unlock(&s->ls_guard);
555 lu_object_free(env, o);
560 * Much like lu_object_find(), but top level device of object is specifically
561 * \a dev rather than top level device of the site. This interface allows
562 * objects of different "stacking" to be created within the same site.
564 struct lu_object *lu_object_find_at(const struct lu_env *env,
565 struct lu_device *dev,
566 const struct lu_fid *f,
567 const struct lu_object_conf *conf)
569 struct lu_object *obj;
573 obj = lu_object_find_try(env, dev, f, conf, &wait);
574 if (obj == ERR_PTR(-EAGAIN)) {
576 * lu_object_find_try() already added waiter into the
579 cfs_waitq_wait(&wait, CFS_TASK_UNINT);
580 cfs_waitq_del(&dev->ld_site->ls_marche_funebre, &wait);
586 EXPORT_SYMBOL(lu_object_find_at);
589 * Find object with given fid, and return its slice belonging to given device.
591 struct lu_object *lu_object_find_slice(const struct lu_env *env,
592 struct lu_device *dev,
593 const struct lu_fid *f,
594 const struct lu_object_conf *conf)
596 struct lu_object *top;
597 struct lu_object *obj;
599 top = lu_object_find(env, dev, f, conf);
601 obj = lu_object_locate(top->lo_header, dev->ld_type);
603 lu_object_put(env, top);
608 EXPORT_SYMBOL(lu_object_find_slice);
611 * Global list of all device types.
613 static CFS_LIST_HEAD(lu_device_types);
615 int lu_device_type_init(struct lu_device_type *ldt)
619 CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
620 result = ldt->ldt_ops->ldto_init(ldt);
622 cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
625 EXPORT_SYMBOL(lu_device_type_init);
627 void lu_device_type_fini(struct lu_device_type *ldt)
629 cfs_list_del_init(&ldt->ldt_linkage);
630 ldt->ldt_ops->ldto_fini(ldt);
632 EXPORT_SYMBOL(lu_device_type_fini);
634 void lu_types_stop(void)
636 struct lu_device_type *ldt;
638 cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
639 if (ldt->ldt_device_nr == 0)
640 ldt->ldt_ops->ldto_stop(ldt);
643 EXPORT_SYMBOL(lu_types_stop);
646 * Global list of all sites on this node
648 static CFS_LIST_HEAD(lu_sites);
649 static CFS_DECLARE_MUTEX(lu_sites_guard);
652 * Global environment used by site shrinker.
654 static struct lu_env lu_shrink_env;
657 * Print all objects in \a s.
659 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
660 lu_printer_t printer)
664 for (i = 0; i < s->ls_hash_size; ++i) {
665 struct lu_object_header *h;
666 cfs_hlist_node_t *scan;
668 cfs_read_lock(&s->ls_guard);
669 cfs_hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) {
671 if (!cfs_list_empty(&h->loh_layers)) {
672 const struct lu_object *obj;
674 obj = lu_object_top(h);
675 lu_object_print(env, cookie, printer, obj);
677 lu_object_header_print(env, cookie, printer, h);
679 cfs_read_unlock(&s->ls_guard);
682 EXPORT_SYMBOL(lu_site_print);
685 LU_CACHE_PERCENT = 20,
689 * Return desired hash table order.
691 static int lu_htable_order(void)
693 unsigned long cache_size;
697 * Calculate hash table size, assuming that we want reasonable
698 * performance when 20% of total memory is occupied by cache of
701 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
703 cache_size = cfs_num_physpages;
705 #if BITS_PER_LONG == 32
706 /* limit hashtable size for lowmem systems to low RAM */
707 if (cache_size > 1 << (30 - CFS_PAGE_SHIFT))
708 cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
711 cache_size = cache_size / 100 * LU_CACHE_PERCENT *
712 (CFS_PAGE_SIZE / 1024);
714 for (bits = 1; (1 << bits) < cache_size; ++bits) {
720 static cfs_lock_class_key_t lu_site_guard_class;
723 * Initialize site \a s, with \a d as the top level device.
725 int lu_site_init(struct lu_site *s, struct lu_device *top)
732 memset(s, 0, sizeof *s);
733 cfs_rwlock_init(&s->ls_guard);
734 cfs_lockdep_set_class(&s->ls_guard, &lu_site_guard_class);
735 CFS_INIT_LIST_HEAD(&s->ls_lru);
736 CFS_INIT_LIST_HEAD(&s->ls_linkage);
737 cfs_waitq_init(&s->ls_marche_funebre);
741 lu_ref_add(&top->ld_reference, "site-top", s);
743 for (bits = lu_htable_order(), size = 1 << bits;
745 cfs_alloc_large(size * sizeof s->ls_hash[0])) == NULL;
746 --bits, size >>= 1) {
748 * Scale hash table down, until allocation succeeds.
753 s->ls_hash_size = size;
754 s->ls_hash_bits = bits;
755 s->ls_hash_mask = size - 1;
757 for (i = 0; i < size; i++)
758 CFS_INIT_HLIST_HEAD(&s->ls_hash[i]);
762 EXPORT_SYMBOL(lu_site_init);
765 * Finalize \a s and release its resources.
767 void lu_site_fini(struct lu_site *s)
769 LASSERT(cfs_list_empty(&s->ls_lru));
770 LASSERT(s->ls_total == 0);
772 cfs_down(&lu_sites_guard);
773 cfs_list_del_init(&s->ls_linkage);
774 cfs_up(&lu_sites_guard);
776 if (s->ls_hash != NULL) {
778 for (i = 0; i < s->ls_hash_size; i++)
779 LASSERT(cfs_hlist_empty(&s->ls_hash[i]));
780 cfs_free_large(s->ls_hash);
783 if (s->ls_top_dev != NULL) {
784 s->ls_top_dev->ld_site = NULL;
785 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
786 lu_device_put(s->ls_top_dev);
787 s->ls_top_dev = NULL;
790 EXPORT_SYMBOL(lu_site_fini);
793 * Called when initialization of stack for this site is completed.
795 int lu_site_init_finish(struct lu_site *s)
798 cfs_down(&lu_sites_guard);
799 result = lu_context_refill(&lu_shrink_env.le_ctx);
801 cfs_list_add(&s->ls_linkage, &lu_sites);
802 cfs_up(&lu_sites_guard);
805 EXPORT_SYMBOL(lu_site_init_finish);
808 * Acquire additional reference on device \a d
810 void lu_device_get(struct lu_device *d)
812 cfs_atomic_inc(&d->ld_ref);
814 EXPORT_SYMBOL(lu_device_get);
817 * Release reference on device \a d.
819 void lu_device_put(struct lu_device *d)
821 LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
822 cfs_atomic_dec(&d->ld_ref);
824 EXPORT_SYMBOL(lu_device_put);
827 * Initialize device \a d of type \a t.
829 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
831 if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
832 t->ldt_ops->ldto_start(t);
833 memset(d, 0, sizeof *d);
834 cfs_atomic_set(&d->ld_ref, 0);
836 lu_ref_init(&d->ld_reference);
839 EXPORT_SYMBOL(lu_device_init);
842 * Finalize device \a d.
844 void lu_device_fini(struct lu_device *d)
846 struct lu_device_type *t;
849 if (d->ld_obd != NULL) {
850 d->ld_obd->obd_lu_dev = NULL;
854 lu_ref_fini(&d->ld_reference);
855 LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
856 "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
857 LASSERT(t->ldt_device_nr > 0);
858 if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
859 t->ldt_ops->ldto_stop(t);
861 EXPORT_SYMBOL(lu_device_fini);
864 * Initialize object \a o that is part of compound object \a h and was created
867 int lu_object_init(struct lu_object *o,
868 struct lu_object_header *h, struct lu_device *d)
870 memset(o, 0, sizeof *o);
874 o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
875 CFS_INIT_LIST_HEAD(&o->lo_linkage);
878 EXPORT_SYMBOL(lu_object_init);
881 * Finalize object and release its resources.
883 void lu_object_fini(struct lu_object *o)
885 struct lu_device *dev = o->lo_dev;
887 LASSERT(cfs_list_empty(&o->lo_linkage));
890 lu_ref_del_at(&dev->ld_reference,
891 o->lo_dev_ref , "lu_object", o);
896 EXPORT_SYMBOL(lu_object_fini);
899 * Add object \a o as first layer of compound object \a h
901 * This is typically called by the ->ldo_object_alloc() method of top-level
904 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
906 cfs_list_move(&o->lo_linkage, &h->loh_layers);
908 EXPORT_SYMBOL(lu_object_add_top);
911 * Add object \a o as a layer of compound object, going after \a before.
913 * This is typically called by the ->ldo_object_alloc() method of \a
916 void lu_object_add(struct lu_object *before, struct lu_object *o)
918 cfs_list_move(&o->lo_linkage, &before->lo_linkage);
920 EXPORT_SYMBOL(lu_object_add);
923 * Initialize compound object.
925 int lu_object_header_init(struct lu_object_header *h)
927 memset(h, 0, sizeof *h);
928 cfs_atomic_set(&h->loh_ref, 1);
929 CFS_INIT_HLIST_NODE(&h->loh_hash);
930 CFS_INIT_LIST_HEAD(&h->loh_lru);
931 CFS_INIT_LIST_HEAD(&h->loh_layers);
932 lu_ref_init(&h->loh_reference);
935 EXPORT_SYMBOL(lu_object_header_init);
938 * Finalize compound object.
940 void lu_object_header_fini(struct lu_object_header *h)
942 LASSERT(cfs_list_empty(&h->loh_layers));
943 LASSERT(cfs_list_empty(&h->loh_lru));
944 LASSERT(cfs_hlist_unhashed(&h->loh_hash));
945 lu_ref_fini(&h->loh_reference);
947 EXPORT_SYMBOL(lu_object_header_fini);
950 * Given a compound object, find its slice, corresponding to the device type
953 struct lu_object *lu_object_locate(struct lu_object_header *h,
954 const struct lu_device_type *dtype)
958 cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
959 if (o->lo_dev->ld_type == dtype)
964 EXPORT_SYMBOL(lu_object_locate);
969 * Finalize and free devices in the device stack.
971 * Finalize device stack by purging object cache, and calling
972 * lu_device_type_operations::ldto_device_fini() and
973 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
975 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
977 struct lu_site *site = top->ld_site;
978 struct lu_device *scan;
979 struct lu_device *next;
981 lu_site_purge(env, site, ~0);
982 for (scan = top; scan != NULL; scan = next) {
983 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
984 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
989 lu_site_purge(env, site, ~0);
991 if (!cfs_list_empty(&site->ls_lru) || site->ls_total != 0) {
993 * Uh-oh, objects still exist.
995 static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR);
997 lu_site_print(env, site, &cookie, lu_cdebug_printer);
1000 for (scan = top; scan != NULL; scan = next) {
1001 const struct lu_device_type *ldt = scan->ld_type;
1002 struct obd_type *type;
1004 next = ldt->ldt_ops->ldto_device_free(env, scan);
1005 type = ldt->ldt_obd_type;
1008 class_put_type(type);
1012 EXPORT_SYMBOL(lu_stack_fini);
1016 * Maximal number of tld slots.
1018 LU_CONTEXT_KEY_NR = 32
1021 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1023 static cfs_spinlock_t lu_keys_guard = CFS_SPIN_LOCK_UNLOCKED;
1026 * Global counter incremented whenever key is registered, unregistered,
1027 * revived or quiesced. This is used to void unnecessary calls to
1028 * lu_context_refill(). No locking is provided, as initialization and shutdown
1029 * are supposed to be externally serialized.
1031 static unsigned key_set_version = 0;
1036 int lu_context_key_register(struct lu_context_key *key)
1041 LASSERT(key->lct_init != NULL);
1042 LASSERT(key->lct_fini != NULL);
1043 LASSERT(key->lct_tags != 0);
1044 LASSERT(key->lct_owner != NULL);
1047 cfs_spin_lock(&lu_keys_guard);
1048 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1049 if (lu_keys[i] == NULL) {
1051 cfs_atomic_set(&key->lct_used, 1);
1053 lu_ref_init(&key->lct_reference);
1059 cfs_spin_unlock(&lu_keys_guard);
1062 EXPORT_SYMBOL(lu_context_key_register);
1064 static void key_fini(struct lu_context *ctx, int index)
1066 if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1067 struct lu_context_key *key;
1069 key = lu_keys[index];
1070 LASSERT(key != NULL);
1071 LASSERT(key->lct_fini != NULL);
1072 LASSERT(cfs_atomic_read(&key->lct_used) > 1);
1074 key->lct_fini(ctx, key, ctx->lc_value[index]);
1075 lu_ref_del(&key->lct_reference, "ctx", ctx);
1076 cfs_atomic_dec(&key->lct_used);
1077 LASSERT(key->lct_owner != NULL);
1078 if (!(ctx->lc_tags & LCT_NOREF)) {
1079 LASSERT(cfs_module_refcount(key->lct_owner) > 0);
1080 cfs_module_put(key->lct_owner);
1082 ctx->lc_value[index] = NULL;
1089 void lu_context_key_degister(struct lu_context_key *key)
1091 LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
1092 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1094 lu_context_key_quiesce(key);
1097 cfs_spin_lock(&lu_keys_guard);
1098 key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1099 if (lu_keys[key->lct_index]) {
1100 lu_keys[key->lct_index] = NULL;
1101 lu_ref_fini(&key->lct_reference);
1103 cfs_spin_unlock(&lu_keys_guard);
1105 LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
1106 "key has instances: %d\n",
1107 cfs_atomic_read(&key->lct_used));
1109 EXPORT_SYMBOL(lu_context_key_degister);
1112 * Register a number of keys. This has to be called after all keys have been
1113 * initialized by a call to LU_CONTEXT_KEY_INIT().
1115 int lu_context_key_register_many(struct lu_context_key *k, ...)
1117 struct lu_context_key *key = k;
1123 result = lu_context_key_register(key);
1126 key = va_arg(args, struct lu_context_key *);
1127 } while (key != NULL);
1133 lu_context_key_degister(k);
1134 k = va_arg(args, struct lu_context_key *);
1141 EXPORT_SYMBOL(lu_context_key_register_many);
1144 * De-register a number of keys. This is a dual to
1145 * lu_context_key_register_many().
1147 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1153 lu_context_key_degister(k);
1154 k = va_arg(args, struct lu_context_key*);
1155 } while (k != NULL);
1158 EXPORT_SYMBOL(lu_context_key_degister_many);
1161 * Revive a number of keys.
1163 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1169 lu_context_key_revive(k);
1170 k = va_arg(args, struct lu_context_key*);
1171 } while (k != NULL);
1174 EXPORT_SYMBOL(lu_context_key_revive_many);
1177 * Quiescent a number of keys.
1179 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1185 lu_context_key_quiesce(k);
1186 k = va_arg(args, struct lu_context_key*);
1187 } while (k != NULL);
1190 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1193 * Return value associated with key \a key in context \a ctx.
1195 void *lu_context_key_get(const struct lu_context *ctx,
1196 const struct lu_context_key *key)
1198 LINVRNT(ctx->lc_state == LCS_ENTERED);
1199 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1200 LASSERT(lu_keys[key->lct_index] == key);
1201 return ctx->lc_value[key->lct_index];
1203 EXPORT_SYMBOL(lu_context_key_get);
1206 * List of remembered contexts. XXX document me.
1208 static CFS_LIST_HEAD(lu_context_remembered);
1211 * Destroy \a key in all remembered contexts. This is used to destroy key
1212 * values in "shared" contexts (like service threads), when a module owning
1213 * the key is about to be unloaded.
1215 void lu_context_key_quiesce(struct lu_context_key *key)
1217 struct lu_context *ctx;
1218 extern unsigned cl_env_cache_purge(unsigned nr);
1220 if (!(key->lct_tags & LCT_QUIESCENT)) {
1222 * XXX layering violation.
1224 cl_env_cache_purge(~0);
1225 key->lct_tags |= LCT_QUIESCENT;
1227 * XXX memory barrier has to go here.
1229 cfs_spin_lock(&lu_keys_guard);
1230 cfs_list_for_each_entry(ctx, &lu_context_remembered,
1232 key_fini(ctx, key->lct_index);
1233 cfs_spin_unlock(&lu_keys_guard);
1237 EXPORT_SYMBOL(lu_context_key_quiesce);
1239 void lu_context_key_revive(struct lu_context_key *key)
1241 key->lct_tags &= ~LCT_QUIESCENT;
1244 EXPORT_SYMBOL(lu_context_key_revive);
1246 static void keys_fini(struct lu_context *ctx)
1250 cfs_spin_lock(&lu_keys_guard);
1251 if (ctx->lc_value != NULL) {
1252 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1254 OBD_FREE(ctx->lc_value,
1255 ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1256 ctx->lc_value = NULL;
1258 cfs_spin_unlock(&lu_keys_guard);
1261 static int keys_fill(struct lu_context *ctx)
1265 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1266 struct lu_context_key *key;
1269 if (ctx->lc_value[i] == NULL && key != NULL &&
1270 (key->lct_tags & ctx->lc_tags) &&
1272 * Don't create values for a LCT_QUIESCENT key, as this
1273 * will pin module owning a key.
1275 !(key->lct_tags & LCT_QUIESCENT)) {
1278 LINVRNT(key->lct_init != NULL);
1279 LINVRNT(key->lct_index == i);
1281 value = key->lct_init(ctx, key);
1282 if (unlikely(IS_ERR(value)))
1283 return PTR_ERR(value);
1285 LASSERT(key->lct_owner != NULL);
1286 if (!(ctx->lc_tags & LCT_NOREF))
1287 cfs_try_module_get(key->lct_owner);
1288 lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1289 cfs_atomic_inc(&key->lct_used);
1291 * This is the only place in the code, where an
1292 * element of ctx->lc_value[] array is set to non-NULL
1295 ctx->lc_value[i] = value;
1296 if (key->lct_exit != NULL)
1297 ctx->lc_tags |= LCT_HAS_EXIT;
1299 ctx->lc_version = key_set_version;
1304 static int keys_init(struct lu_context *ctx)
1308 OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1309 if (likely(ctx->lc_value != NULL))
1310 result = keys_fill(ctx);
1320 * Initialize context data-structure. Create values for all keys.
1322 int lu_context_init(struct lu_context *ctx, __u32 tags)
1324 memset(ctx, 0, sizeof *ctx);
1325 ctx->lc_state = LCS_INITIALIZED;
1326 ctx->lc_tags = tags;
1327 if (tags & LCT_REMEMBER) {
1328 cfs_spin_lock(&lu_keys_guard);
1329 cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
1330 cfs_spin_unlock(&lu_keys_guard);
1332 CFS_INIT_LIST_HEAD(&ctx->lc_remember);
1333 return keys_init(ctx);
1335 EXPORT_SYMBOL(lu_context_init);
1338 * Finalize context data-structure. Destroy key values.
1340 void lu_context_fini(struct lu_context *ctx)
1342 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1343 ctx->lc_state = LCS_FINALIZED;
1345 cfs_spin_lock(&lu_keys_guard);
1346 cfs_list_del_init(&ctx->lc_remember);
1347 cfs_spin_unlock(&lu_keys_guard);
1349 EXPORT_SYMBOL(lu_context_fini);
1352 * Called before entering context.
1354 void lu_context_enter(struct lu_context *ctx)
1356 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1357 ctx->lc_state = LCS_ENTERED;
1359 EXPORT_SYMBOL(lu_context_enter);
1362 * Called after exiting from \a ctx
1364 void lu_context_exit(struct lu_context *ctx)
1368 LINVRNT(ctx->lc_state == LCS_ENTERED);
1369 ctx->lc_state = LCS_LEFT;
1370 if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1371 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1372 if (ctx->lc_value[i] != NULL) {
1373 struct lu_context_key *key;
1376 LASSERT(key != NULL);
1377 if (key->lct_exit != NULL)
1379 key, ctx->lc_value[i]);
1384 EXPORT_SYMBOL(lu_context_exit);
1387 * Allocate for context all missing keys that were registered after context
1390 int lu_context_refill(struct lu_context *ctx)
1392 LINVRNT(ctx->lc_value != NULL);
1393 return ctx->lc_version == key_set_version ? 0 : keys_fill(ctx);
1395 EXPORT_SYMBOL(lu_context_refill);
1397 int lu_env_init(struct lu_env *env, __u32 tags)
1402 result = lu_context_init(&env->le_ctx, tags);
1403 if (likely(result == 0))
1404 lu_context_enter(&env->le_ctx);
1407 EXPORT_SYMBOL(lu_env_init);
1409 void lu_env_fini(struct lu_env *env)
1411 lu_context_exit(&env->le_ctx);
1412 lu_context_fini(&env->le_ctx);
1415 EXPORT_SYMBOL(lu_env_fini);
1417 int lu_env_refill(struct lu_env *env)
1421 result = lu_context_refill(&env->le_ctx);
1422 if (result == 0 && env->le_ses != NULL)
1423 result = lu_context_refill(env->le_ses);
1426 EXPORT_SYMBOL(lu_env_refill);
1428 static struct cfs_shrinker *lu_site_shrinker = NULL;
1431 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1434 struct lu_site *tmp;
1437 CFS_LIST_HEAD(splice);
1440 if (!(gfp_mask & __GFP_FS))
1442 CDEBUG(D_INODE, "Shrink %d objects\n", nr);
1445 cfs_down(&lu_sites_guard);
1446 cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1448 remain = lu_site_purge(&lu_shrink_env, s, remain);
1450 * Move just shrunk site to the tail of site list to
1451 * assure shrinking fairness.
1453 cfs_list_move_tail(&s->ls_linkage, &splice);
1455 cfs_read_lock(&s->ls_guard);
1456 cached += s->ls_total - s->ls_busy;
1457 cfs_read_unlock(&s->ls_guard);
1458 if (nr && remain <= 0)
1461 cfs_list_splice(&splice, lu_sites.prev);
1462 cfs_up(&lu_sites_guard);
1464 cached = (cached / 100) * sysctl_vfs_cache_pressure;
1466 CDEBUG(D_INODE, "%d objects cached\n", cached);
1475 * Environment to be used in debugger, contains all tags.
1477 struct lu_env lu_debugging_env;
1480 * Debugging printer function using printk().
1482 int lu_printk_printer(const struct lu_env *env,
1483 void *unused, const char *format, ...)
1487 va_start(args, format);
1488 vprintk(format, args);
1493 void lu_debugging_setup(void)
1495 lu_env_init(&lu_debugging_env, ~0);
1498 void lu_context_keys_dump(void)
1502 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1503 struct lu_context_key *key;
1507 CERROR("[%i]: %p %x (%p,%p,%p) %i %i \"%s\"@%p\n",
1508 i, key, key->lct_tags,
1509 key->lct_init, key->lct_fini, key->lct_exit,
1510 key->lct_index, cfs_atomic_read(&key->lct_used),
1511 key->lct_owner ? key->lct_owner->name : "",
1513 lu_ref_print(&key->lct_reference);
1517 EXPORT_SYMBOL(lu_context_keys_dump);
1518 #else /* !__KERNEL__ */
1519 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1523 #endif /* __KERNEL__ */
1525 int cl_global_init(void);
1526 void cl_global_fini(void);
1527 int lu_ref_global_init(void);
1528 void lu_ref_global_fini(void);
1530 int dt_global_init(void);
1531 void dt_global_fini(void);
1533 int llo_global_init(void);
1534 void llo_global_fini(void);
1537 * Initialization of global lu_* data.
1539 int lu_global_init(void)
1543 CDEBUG(D_CONSOLE, "Lustre LU module (%p).\n", &lu_keys);
1545 result = lu_ref_global_init();
1549 LU_CONTEXT_KEY_INIT(&lu_global_key);
1550 result = lu_context_key_register(&lu_global_key);
1554 * At this level, we don't know what tags are needed, so allocate them
1555 * conservatively. This should not be too bad, because this
1556 * environment is global.
1558 cfs_down(&lu_sites_guard);
1559 result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
1560 cfs_up(&lu_sites_guard);
1565 * seeks estimation: 3 seeks to read a record from oi, one to read
1566 * inode, one for ea. Unfortunately setting this high value results in
1567 * lu_object/inode cache consuming all the memory.
1569 lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
1570 if (lu_site_shrinker == NULL)
1573 result = lu_time_global_init();
1578 result = dt_global_init();
1582 result = llo_global_init();
1586 result = cl_global_init();
1593 * Dual to lu_global_init().
1595 void lu_global_fini(void)
1602 lu_time_global_fini();
1603 if (lu_site_shrinker != NULL) {
1604 cfs_remove_shrinker(lu_site_shrinker);
1605 lu_site_shrinker = NULL;
1608 lu_context_key_degister(&lu_global_key);
1611 * Tear shrinker environment down _after_ de-registering
1612 * lu_global_key, because the latter has a value in the former.
1614 cfs_down(&lu_sites_guard);
1615 lu_env_fini(&lu_shrink_env);
1616 cfs_up(&lu_sites_guard);
1618 lu_ref_global_fini();
1621 struct lu_buf LU_BUF_NULL = {
1625 EXPORT_SYMBOL(LU_BUF_NULL);
1628 * Output site statistical counters into a buffer. Suitable for
1629 * lprocfs_rd_*()-style functions.
1631 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
1637 * How many hash buckets are not-empty? Don't bother with locks: it's
1638 * an estimation anyway.
1640 for (i = 0, populated = 0; i < s->ls_hash_size; i++)
1641 populated += !cfs_hlist_empty(&s->ls_hash[i]);
1643 return snprintf(page, count, "%d %d %d/%d %d %d %d %d %d %d %d\n",
1648 s->ls_stats.s_created,
1649 s->ls_stats.s_cache_hit,
1650 s->ls_stats.s_cache_miss,
1651 s->ls_stats.s_cache_check,
1652 s->ls_stats.s_cache_race,
1653 s->ls_stats.s_cache_death_race,
1654 s->ls_stats.s_lru_purged);
1656 EXPORT_SYMBOL(lu_site_stats_print);
1658 const char *lu_time_names[LU_TIME_NR] = {
1659 [LU_TIME_FIND_LOOKUP] = "find_lookup",
1660 [LU_TIME_FIND_ALLOC] = "find_alloc",
1661 [LU_TIME_FIND_INSERT] = "find_insert"
1663 EXPORT_SYMBOL(lu_time_names);
1666 * Helper function to initialize a number of kmem slab caches at once.
1668 int lu_kmem_init(struct lu_kmem_descr *caches)
1672 for (result = 0; caches->ckd_cache != NULL; ++caches) {
1673 *caches->ckd_cache = cfs_mem_cache_create(caches->ckd_name,
1676 if (*caches->ckd_cache == NULL) {
1683 EXPORT_SYMBOL(lu_kmem_init);
1686 * Helper function to finalize a number of kmem slab cached at once. Dual to
1689 void lu_kmem_fini(struct lu_kmem_descr *caches)
1693 for (; caches->ckd_cache != NULL; ++caches) {
1694 if (*caches->ckd_cache != NULL) {
1695 rc = cfs_mem_cache_destroy(*caches->ckd_cache);
1696 LASSERTF(rc == 0, "couldn't destroy %s slab\n",
1698 *caches->ckd_cache = NULL;
1702 EXPORT_SYMBOL(lu_kmem_fini);