4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_object.c
39 * These are the only exported functions, they provide some generic
40 * infrastructure for managing object devices
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
45 #define DEBUG_SUBSYSTEM S_CLASS
47 # define EXPORT_SYMTAB
50 #include <libcfs/libcfs.h>
53 # include <linux/module.h>
57 #include <libcfs/libcfs_hash.h>
58 #include <obd_class.h>
59 #include <obd_support.h>
60 #include <lustre_disk.h>
61 #include <lustre_fid.h>
62 #include <lu_object.h>
63 #include <libcfs/list.h>
64 /* lu_time_global_{init,fini}() */
67 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
70 * Decrease reference counter on object. If last reference is freed, return
71 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
72 * case, free object immediately.
74 void lu_object_put(const struct lu_env *env, struct lu_object *o)
76 struct lu_site_bkt_data *bkt;
77 struct lu_object_header *top;
79 struct lu_object *orig;
83 site = o->lo_dev->ld_site;
86 cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
87 bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
89 if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
90 if (lu_object_is_dying(top)) {
93 * somebody may be waiting for this, currently only
94 * used for cl_object, see cl_object_put_last().
96 cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
101 LASSERT(bkt->lsb_busy > 0);
104 * When last reference is released, iterate over object
105 * layers, and notify them that object is no longer busy.
107 cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
108 if (o->lo_ops->loo_object_release != NULL)
109 o->lo_ops->loo_object_release(env, o);
112 if (!lu_object_is_dying(top)) {
113 LASSERT(cfs_list_empty(&top->loh_lru));
114 cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru);
115 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
120 * If object is dying (will not be cached), removed it
121 * from hash table and LRU.
123 * This is done with hash table and LRU lists locked. As the only
124 * way to acquire first reference to previously unreferenced
125 * object is through hash-table lookup (lu_object_find()),
126 * or LRU scanning (lu_site_purge()), that are done under hash-table
127 * and LRU lock, no race with concurrent object lookup is possible
128 * and we can safely destroy object below.
130 cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
131 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
133 * Object was already removed from hash and lru above, can
136 lu_object_free(env, orig);
138 EXPORT_SYMBOL(lu_object_put);
141 * Allocate new object.
143 * This follows object creation protocol, described in the comment within
144 * struct lu_device_operations definition.
146 static struct lu_object *lu_object_alloc(const struct lu_env *env,
147 struct lu_device *dev,
148 const struct lu_fid *f,
149 const struct lu_object_conf *conf)
151 struct lu_object *scan;
152 struct lu_object *top;
159 * Create top-level object slice. This will also create
162 top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
164 RETURN(ERR_PTR(-ENOMEM));
166 * This is the only place where object fid is assigned. It's constant
169 LASSERT(fid_is_igif(f) || fid_ver(f) == 0);
170 top->lo_header->loh_fid = *f;
171 layers = &top->lo_header->loh_layers;
174 * Call ->loo_object_init() repeatedly, until no more new
175 * object slices are created.
178 cfs_list_for_each_entry(scan, layers, lo_linkage) {
179 if (scan->lo_flags & LU_OBJECT_ALLOCATED)
182 scan->lo_header = top->lo_header;
183 result = scan->lo_ops->loo_object_init(env, scan, conf);
185 lu_object_free(env, top);
186 RETURN(ERR_PTR(result));
188 scan->lo_flags |= LU_OBJECT_ALLOCATED;
192 cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
193 if (scan->lo_ops->loo_object_start != NULL) {
194 result = scan->lo_ops->loo_object_start(env, scan);
196 lu_object_free(env, top);
197 RETURN(ERR_PTR(result));
202 lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
209 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
211 struct lu_site_bkt_data *bkt;
212 struct lu_site *site;
213 struct lu_object *scan;
217 site = o->lo_dev->ld_site;
218 layers = &o->lo_header->loh_layers;
219 bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
221 * First call ->loo_object_delete() method to release all resources.
223 cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
224 if (scan->lo_ops->loo_object_delete != NULL)
225 scan->lo_ops->loo_object_delete(env, scan);
229 * Then, splice object layers into stand-alone list, and call
230 * ->loo_object_free() on all layers to free memory. Splice is
231 * necessary, because lu_object_header is freed together with the
234 CFS_INIT_LIST_HEAD(&splice);
235 cfs_list_splice_init(layers, &splice);
236 while (!cfs_list_empty(&splice)) {
238 * Free layers in bottom-to-top order, so that object header
239 * lives as long as possible and ->loo_object_free() methods
240 * can look at its contents.
242 o = container_of0(splice.prev, struct lu_object, lo_linkage);
243 cfs_list_del_init(&o->lo_linkage);
244 LASSERT(o->lo_ops->loo_object_free != NULL);
245 o->lo_ops->loo_object_free(env, o);
248 if (cfs_waitq_active(&bkt->lsb_marche_funebre))
249 cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
253 * Free \a nr objects from the cold end of the site LRU list.
255 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
257 struct lu_object_header *h;
258 struct lu_object_header *temp;
259 struct lu_site_bkt_data *bkt;
269 CFS_INIT_LIST_HEAD(&dispose);
271 * Under LRU list lock, scan LRU list and move unreferenced objects to
272 * the dispose list, removing them from LRU and hash table.
274 start = s->ls_purge_start;
275 bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
278 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
282 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
283 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
285 cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
286 LASSERT(cfs_atomic_read(&h->loh_ref) == 0);
288 cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
289 LASSERT(bd.bd_bucket == bd2.bd_bucket);
291 cfs_hash_bd_del_locked(s->ls_obj_hash,
293 cfs_list_move(&h->loh_lru, &dispose);
297 if (nr != ~0 && --nr == 0)
300 if (count > 0 && --count == 0)
304 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
307 * Free everything on the dispose list. This is safe against
308 * races due to the reasons described in lu_object_put().
310 while (!cfs_list_empty(&dispose)) {
311 h = container_of0(dispose.next,
312 struct lu_object_header, loh_lru);
313 cfs_list_del_init(&h->loh_lru);
314 lu_object_free(env, lu_object_top(h));
315 lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
322 if (nr != 0 && did_sth && start != 0) {
323 start = 0; /* restart from the first bucket */
326 /* race on s->ls_purge_start, but nobody cares */
327 s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
331 EXPORT_SYMBOL(lu_site_purge);
336 * Code below has to jump through certain loops to output object description
337 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
338 * composes object description from strings that are parts of _lines_ of
339 * output (i.e., strings that are not terminated by newline). This doesn't fit
340 * very well into libcfs_debug_msg() interface that assumes that each message
341 * supplied to it is a self-contained output line.
343 * To work around this, strings are collected in a temporary buffer
344 * (implemented as a value of lu_cdebug_key key), until terminating newline
345 * character is detected.
353 * XXX overflow is not handled correctly.
358 struct lu_cdebug_data {
362 char lck_area[LU_CDEBUG_LINE];
365 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
366 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
369 * Key, holding temporary buffer. This key is registered very early by
372 struct lu_context_key lu_global_key = {
373 .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
374 LCT_MG_THREAD | LCT_CL_THREAD,
375 .lct_init = lu_global_key_init,
376 .lct_fini = lu_global_key_fini
380 * Printer function emitting messages through libcfs_debug_msg().
382 int lu_cdebug_printer(const struct lu_env *env,
383 void *cookie, const char *format, ...)
385 struct libcfs_debug_msg_data *msgdata = cookie;
386 struct lu_cdebug_data *key;
391 va_start(args, format);
393 key = lu_context_key_get(&env->le_ctx, &lu_global_key);
394 LASSERT(key != NULL);
396 used = strlen(key->lck_area);
397 complete = format[strlen(format) - 1] == '\n';
399 * Append new chunk to the buffer.
401 vsnprintf(key->lck_area + used,
402 ARRAY_SIZE(key->lck_area) - used, format, args);
404 if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
405 libcfs_debug_msg(msgdata, "%s", key->lck_area);
406 key->lck_area[0] = 0;
411 EXPORT_SYMBOL(lu_cdebug_printer);
414 * Print object header.
416 void lu_object_header_print(const struct lu_env *env, void *cookie,
417 lu_printer_t printer,
418 const struct lu_object_header *hdr)
420 (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
421 hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
423 cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
424 cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
426 hdr->loh_attr & LOHA_EXISTS ? " exist":"");
428 EXPORT_SYMBOL(lu_object_header_print);
431 * Print human readable representation of the \a o to the \a printer.
433 void lu_object_print(const struct lu_env *env, void *cookie,
434 lu_printer_t printer, const struct lu_object *o)
436 static const char ruler[] = "........................................";
437 struct lu_object_header *top;
441 lu_object_header_print(env, cookie, printer, top);
442 (*printer)(env, cookie, "{ \n");
443 cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
444 depth = o->lo_depth + 4;
447 * print `.' \a depth times followed by type name and address
449 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
450 o->lo_dev->ld_type->ldt_name, o);
451 if (o->lo_ops->loo_object_print != NULL)
452 o->lo_ops->loo_object_print(env, cookie, printer, o);
453 (*printer)(env, cookie, "\n");
455 (*printer)(env, cookie, "} header@%p\n", top);
457 EXPORT_SYMBOL(lu_object_print);
460 * Check object consistency.
462 int lu_object_invariant(const struct lu_object *o)
464 struct lu_object_header *top;
467 cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
468 if (o->lo_ops->loo_object_invariant != NULL &&
469 !o->lo_ops->loo_object_invariant(o))
474 EXPORT_SYMBOL(lu_object_invariant);
476 static struct lu_object *htable_lookup(struct lu_site *s,
478 const struct lu_fid *f,
479 cfs_waitlink_t *waiter,
482 struct lu_site_bkt_data *bkt;
483 struct lu_object_header *h;
484 cfs_hlist_node_t *hnode;
485 __u64 ver = cfs_hash_bd_version_get(bd);
491 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
492 /* cfs_hash_bd_lookup_intent is a somehow "internal" function
493 * of cfs_hash, but we don't want refcount on object right now */
494 hnode = cfs_hash_bd_lookup_locked(s->ls_obj_hash, bd, (void *)f);
496 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
500 h = container_of0(hnode, struct lu_object_header, loh_hash);
501 if (likely(!lu_object_is_dying(h))) {
502 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
503 cfs_list_del_init(&h->loh_lru);
504 return lu_object_top(h);
508 * Lookup found an object being destroyed this object cannot be
509 * returned (to assure that references to dying objects are eventually
510 * drained), and moreover, lookup has to wait until object is freed.
512 cfs_atomic_dec(&h->loh_ref);
514 cfs_waitlink_init(waiter);
515 cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
516 cfs_set_current_state(CFS_TASK_UNINT);
517 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
518 return ERR_PTR(-EAGAIN);
522 * Search cache for an object with the fid \a f. If such object is found,
523 * return it. Otherwise, create new object, insert it into cache and return
524 * it. In any case, additional reference is acquired on the returned object.
526 struct lu_object *lu_object_find(const struct lu_env *env,
527 struct lu_device *dev, const struct lu_fid *f,
528 const struct lu_object_conf *conf)
530 return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
532 EXPORT_SYMBOL(lu_object_find);
534 static struct lu_object *lu_object_new(const struct lu_env *env,
535 struct lu_device *dev,
536 const struct lu_fid *f,
537 const struct lu_object_conf *conf)
542 struct lu_site_bkt_data *bkt;
544 o = lu_object_alloc(env, dev, f, conf);
545 if (unlikely(IS_ERR(o)))
548 hs = dev->ld_site->ls_obj_hash;
549 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
550 bkt = cfs_hash_bd_extra_get(hs, &bd);
551 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
553 cfs_hash_bd_unlock(hs, &bd, 1);
558 * Core logic of lu_object_find*() functions.
560 static struct lu_object *lu_object_find_try(const struct lu_env *env,
561 struct lu_device *dev,
562 const struct lu_fid *f,
563 const struct lu_object_conf *conf,
564 cfs_waitlink_t *waiter)
567 struct lu_object *shadow;
574 * This uses standard index maintenance protocol:
576 * - search index under lock, and return object if found;
577 * - otherwise, unlock index, allocate new object;
578 * - lock index and search again;
579 * - if nothing is found (usual case), insert newly created
581 * - otherwise (race: other thread inserted object), free
582 * object just allocated.
586 * For "LOC_F_NEW" case, we are sure the object is new established.
587 * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
588 * just alloc and insert directly.
590 * If dying object is found during index search, add @waiter to the
591 * site wait-queue and return ERR_PTR(-EAGAIN).
593 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
594 return lu_object_new(env, dev, f, conf);
598 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
599 o = htable_lookup(s, &bd, f, waiter, &version);
600 cfs_hash_bd_unlock(hs, &bd, 1);
605 * Allocate new object. This may result in rather complicated
606 * operations, including fld queries, inode loading, etc.
608 o = lu_object_alloc(env, dev, f, conf);
609 if (unlikely(IS_ERR(o)))
612 LASSERT(lu_fid_eq(lu_object_fid(o), f));
614 cfs_hash_bd_lock(hs, &bd, 1);
616 shadow = htable_lookup(s, &bd, f, waiter, &version);
617 if (likely(shadow == NULL)) {
618 struct lu_site_bkt_data *bkt;
620 bkt = cfs_hash_bd_extra_get(hs, &bd);
621 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
623 cfs_hash_bd_unlock(hs, &bd, 1);
627 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
628 cfs_hash_bd_unlock(hs, &bd, 1);
629 lu_object_free(env, o);
634 * Much like lu_object_find(), but top level device of object is specifically
635 * \a dev rather than top level device of the site. This interface allows
636 * objects of different "stacking" to be created within the same site.
638 struct lu_object *lu_object_find_at(const struct lu_env *env,
639 struct lu_device *dev,
640 const struct lu_fid *f,
641 const struct lu_object_conf *conf)
643 struct lu_site_bkt_data *bkt;
644 struct lu_object *obj;
648 obj = lu_object_find_try(env, dev, f, conf, &wait);
649 if (obj != ERR_PTR(-EAGAIN))
652 * lu_object_find_try() already added waiter into the
655 cfs_waitq_wait(&wait, CFS_TASK_UNINT);
656 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
657 cfs_waitq_del(&bkt->lsb_marche_funebre, &wait);
660 EXPORT_SYMBOL(lu_object_find_at);
663 * Find object with given fid, and return its slice belonging to given device.
665 struct lu_object *lu_object_find_slice(const struct lu_env *env,
666 struct lu_device *dev,
667 const struct lu_fid *f,
668 const struct lu_object_conf *conf)
670 struct lu_object *top;
671 struct lu_object *obj;
673 top = lu_object_find(env, dev, f, conf);
675 obj = lu_object_locate(top->lo_header, dev->ld_type);
677 lu_object_put(env, top);
682 EXPORT_SYMBOL(lu_object_find_slice);
685 * Global list of all device types.
687 static CFS_LIST_HEAD(lu_device_types);
689 int lu_device_type_init(struct lu_device_type *ldt)
693 CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
694 result = ldt->ldt_ops->ldto_init(ldt);
696 cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
699 EXPORT_SYMBOL(lu_device_type_init);
701 void lu_device_type_fini(struct lu_device_type *ldt)
703 cfs_list_del_init(&ldt->ldt_linkage);
704 ldt->ldt_ops->ldto_fini(ldt);
706 EXPORT_SYMBOL(lu_device_type_fini);
708 void lu_types_stop(void)
710 struct lu_device_type *ldt;
712 cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
713 if (ldt->ldt_device_nr == 0)
714 ldt->ldt_ops->ldto_stop(ldt);
717 EXPORT_SYMBOL(lu_types_stop);
720 * Global list of all sites on this node
722 static CFS_LIST_HEAD(lu_sites);
723 static CFS_DEFINE_MUTEX(lu_sites_guard);
726 * Global environment used by site shrinker.
728 static struct lu_env lu_shrink_env;
730 struct lu_site_print_arg {
731 struct lu_env *lsp_env;
733 lu_printer_t lsp_printer;
737 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
738 cfs_hlist_node_t *hnode, void *data)
740 struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
741 struct lu_object_header *h;
743 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
744 if (!cfs_list_empty(&h->loh_layers)) {
745 const struct lu_object *o;
747 o = lu_object_top(h);
748 lu_object_print(arg->lsp_env, arg->lsp_cookie,
749 arg->lsp_printer, o);
751 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
752 arg->lsp_printer, h);
758 * Print all objects in \a s.
760 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
761 lu_printer_t printer)
763 struct lu_site_print_arg arg = {
764 .lsp_env = (struct lu_env *)env,
765 .lsp_cookie = cookie,
766 .lsp_printer = printer,
769 cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
771 EXPORT_SYMBOL(lu_site_print);
774 LU_CACHE_PERCENT_MAX = 50,
775 LU_CACHE_PERCENT_DEFAULT = 20
778 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
779 CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
780 "Percentage of memory to be used as lu_object cache");
783 * Return desired hash table order.
785 static int lu_htable_order(void)
787 unsigned long cache_size;
791 * Calculate hash table size, assuming that we want reasonable
792 * performance when 20% of total memory is occupied by cache of
795 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
797 cache_size = cfs_num_physpages;
799 #if BITS_PER_LONG == 32
800 /* limit hashtable size for lowmem systems to low RAM */
801 if (cache_size > 1 << (30 - CFS_PAGE_SHIFT))
802 cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
805 /* clear off unreasonable cache setting. */
806 if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
807 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
808 " the range of (0, %u]. Will use default value: %u.\n",
809 lu_cache_percent, LU_CACHE_PERCENT_MAX,
810 LU_CACHE_PERCENT_DEFAULT);
812 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
814 cache_size = cache_size / 100 * lu_cache_percent *
815 (CFS_PAGE_SIZE / 1024);
817 for (bits = 1; (1 << bits) < cache_size; ++bits) {
823 static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
824 const void *key, unsigned mask)
826 struct lu_fid *fid = (struct lu_fid *)key;
829 hash = fid_flatten32(fid);
830 hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
831 hash = cfs_hash_long(hash, hs->hs_bkt_bits);
833 /* give me another random factor */
834 hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
836 hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
837 hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
842 static void *lu_obj_hop_object(cfs_hlist_node_t *hnode)
844 return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
847 static void *lu_obj_hop_key(cfs_hlist_node_t *hnode)
849 struct lu_object_header *h;
851 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
855 static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
857 struct lu_object_header *h;
859 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
860 return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
863 static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
865 struct lu_object_header *h;
867 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
868 if (cfs_atomic_add_return(1, &h->loh_ref) == 1) {
869 struct lu_site_bkt_data *bkt;
872 cfs_hash_bd_get(hs, &h->loh_fid, &bd);
873 bkt = cfs_hash_bd_extra_get(hs, &bd);
878 static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
880 LBUG(); /* we should never called it */
883 cfs_hash_ops_t lu_site_hash_ops = {
884 .hs_hash = lu_obj_hop_hash,
885 .hs_key = lu_obj_hop_key,
886 .hs_keycmp = lu_obj_hop_keycmp,
887 .hs_object = lu_obj_hop_object,
888 .hs_get = lu_obj_hop_get,
889 .hs_put_locked = lu_obj_hop_put_locked,
893 * Initialize site \a s, with \a d as the top level device.
895 #define LU_SITE_BITS_MIN 12
896 #define LU_SITE_BITS_MAX 24
898 * total 256 buckets, we don't want too many buckets because:
899 * - consume too much memory
900 * - avoid unbalanced LRU list
902 #define LU_SITE_BKT_BITS 8
904 int lu_site_init(struct lu_site *s, struct lu_device *top)
906 struct lu_site_bkt_data *bkt;
913 memset(s, 0, sizeof *s);
914 bits = lu_htable_order();
915 snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
916 for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
917 bits >= LU_SITE_BITS_MIN; bits--) {
918 s->ls_obj_hash = cfs_hash_create(name, bits, bits,
919 bits - LU_SITE_BKT_BITS,
922 CFS_HASH_SPIN_BKTLOCK |
923 CFS_HASH_NO_ITEMREF |
925 CFS_HASH_ASSERT_EMPTY);
926 if (s->ls_obj_hash != NULL)
930 if (s->ls_obj_hash == NULL) {
931 CERROR("failed to create lu_site hash with bits: %d\n", bits);
935 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
936 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
937 CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
938 cfs_waitq_init(&bkt->lsb_marche_funebre);
941 s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
942 if (s->ls_stats == NULL) {
943 cfs_hash_putref(s->ls_obj_hash);
944 s->ls_obj_hash = NULL;
948 lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
949 0, "created", "created");
950 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
951 0, "cache_hit", "cache_hit");
952 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
953 0, "cache_miss", "cache_miss");
954 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
955 0, "cache_race", "cache_race");
956 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
957 0, "cache_death_race", "cache_death_race");
958 lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
959 0, "lru_purged", "lru_purged");
961 CFS_INIT_LIST_HEAD(&s->ls_linkage);
965 lu_ref_add(&top->ld_reference, "site-top", s);
967 CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
968 cfs_spin_lock_init(&s->ls_ld_lock);
970 cfs_spin_lock(&s->ls_ld_lock);
971 cfs_list_add(&top->ld_linkage, &s->ls_ld_linkage);
972 cfs_spin_unlock(&s->ls_ld_lock);
976 EXPORT_SYMBOL(lu_site_init);
979 * Finalize \a s and release its resources.
981 void lu_site_fini(struct lu_site *s)
983 cfs_mutex_lock(&lu_sites_guard);
984 cfs_list_del_init(&s->ls_linkage);
985 cfs_mutex_unlock(&lu_sites_guard);
987 if (s->ls_obj_hash != NULL) {
988 cfs_hash_putref(s->ls_obj_hash);
989 s->ls_obj_hash = NULL;
992 if (s->ls_top_dev != NULL) {
993 s->ls_top_dev->ld_site = NULL;
994 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
995 lu_device_put(s->ls_top_dev);
996 s->ls_top_dev = NULL;
999 if (s->ls_stats != NULL)
1000 lprocfs_free_stats(&s->ls_stats);
1002 EXPORT_SYMBOL(lu_site_fini);
1005 * Called when initialization of stack for this site is completed.
1007 int lu_site_init_finish(struct lu_site *s)
1010 cfs_mutex_lock(&lu_sites_guard);
1011 result = lu_context_refill(&lu_shrink_env.le_ctx);
1013 cfs_list_add(&s->ls_linkage, &lu_sites);
1014 cfs_mutex_unlock(&lu_sites_guard);
1017 EXPORT_SYMBOL(lu_site_init_finish);
1020 * Acquire additional reference on device \a d
1022 void lu_device_get(struct lu_device *d)
1024 cfs_atomic_inc(&d->ld_ref);
1026 EXPORT_SYMBOL(lu_device_get);
1029 * Release reference on device \a d.
1031 void lu_device_put(struct lu_device *d)
1033 LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
1034 cfs_atomic_dec(&d->ld_ref);
1036 EXPORT_SYMBOL(lu_device_put);
1039 * Initialize device \a d of type \a t.
1041 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1043 if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
1044 t->ldt_ops->ldto_start(t);
1045 memset(d, 0, sizeof *d);
1046 cfs_atomic_set(&d->ld_ref, 0);
1048 lu_ref_init(&d->ld_reference);
1049 CFS_INIT_LIST_HEAD(&d->ld_linkage);
1052 EXPORT_SYMBOL(lu_device_init);
1055 * Finalize device \a d.
1057 void lu_device_fini(struct lu_device *d)
1059 struct lu_device_type *t;
1062 if (d->ld_obd != NULL) {
1063 d->ld_obd->obd_lu_dev = NULL;
1067 lu_ref_fini(&d->ld_reference);
1068 LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
1069 "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
1070 LASSERT(t->ldt_device_nr > 0);
1071 if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
1072 t->ldt_ops->ldto_stop(t);
1074 EXPORT_SYMBOL(lu_device_fini);
1077 * Initialize object \a o that is part of compound object \a h and was created
1080 int lu_object_init(struct lu_object *o,
1081 struct lu_object_header *h, struct lu_device *d)
1083 memset(o, 0, sizeof *o);
1087 o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
1088 CFS_INIT_LIST_HEAD(&o->lo_linkage);
1091 EXPORT_SYMBOL(lu_object_init);
1094 * Finalize object and release its resources.
1096 void lu_object_fini(struct lu_object *o)
1098 struct lu_device *dev = o->lo_dev;
1100 LASSERT(cfs_list_empty(&o->lo_linkage));
1103 lu_ref_del_at(&dev->ld_reference,
1104 o->lo_dev_ref , "lu_object", o);
1109 EXPORT_SYMBOL(lu_object_fini);
1112 * Add object \a o as first layer of compound object \a h
1114 * This is typically called by the ->ldo_object_alloc() method of top-level
1117 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1119 cfs_list_move(&o->lo_linkage, &h->loh_layers);
1121 EXPORT_SYMBOL(lu_object_add_top);
1124 * Add object \a o as a layer of compound object, going after \a before.
1126 * This is typically called by the ->ldo_object_alloc() method of \a
1129 void lu_object_add(struct lu_object *before, struct lu_object *o)
1131 cfs_list_move(&o->lo_linkage, &before->lo_linkage);
1133 EXPORT_SYMBOL(lu_object_add);
1136 * Initialize compound object.
1138 int lu_object_header_init(struct lu_object_header *h)
1140 memset(h, 0, sizeof *h);
1141 cfs_atomic_set(&h->loh_ref, 1);
1142 CFS_INIT_HLIST_NODE(&h->loh_hash);
1143 CFS_INIT_LIST_HEAD(&h->loh_lru);
1144 CFS_INIT_LIST_HEAD(&h->loh_layers);
1145 lu_ref_init(&h->loh_reference);
1148 EXPORT_SYMBOL(lu_object_header_init);
1151 * Finalize compound object.
1153 void lu_object_header_fini(struct lu_object_header *h)
1155 LASSERT(cfs_list_empty(&h->loh_layers));
1156 LASSERT(cfs_list_empty(&h->loh_lru));
1157 LASSERT(cfs_hlist_unhashed(&h->loh_hash));
1158 lu_ref_fini(&h->loh_reference);
1160 EXPORT_SYMBOL(lu_object_header_fini);
1163 * Given a compound object, find its slice, corresponding to the device type
1166 struct lu_object *lu_object_locate(struct lu_object_header *h,
1167 const struct lu_device_type *dtype)
1169 struct lu_object *o;
1171 cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1172 if (o->lo_dev->ld_type == dtype)
1177 EXPORT_SYMBOL(lu_object_locate);
1182 * Finalize and free devices in the device stack.
1184 * Finalize device stack by purging object cache, and calling
1185 * lu_device_type_operations::ldto_device_fini() and
1186 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1188 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1190 struct lu_site *site = top->ld_site;
1191 struct lu_device *scan;
1192 struct lu_device *next;
1194 lu_site_purge(env, site, ~0);
1195 for (scan = top; scan != NULL; scan = next) {
1196 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1197 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1198 lu_device_put(scan);
1202 lu_site_purge(env, site, ~0);
1204 if (!cfs_hash_is_empty(site->ls_obj_hash)) {
1206 * Uh-oh, objects still exist.
1208 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
1210 lu_site_print(env, site, &msgdata, lu_cdebug_printer);
1213 for (scan = top; scan != NULL; scan = next) {
1214 const struct lu_device_type *ldt = scan->ld_type;
1215 struct obd_type *type;
1217 next = ldt->ldt_ops->ldto_device_free(env, scan);
1218 type = ldt->ldt_obd_type;
1221 class_put_type(type);
1225 EXPORT_SYMBOL(lu_stack_fini);
1229 * Maximal number of tld slots.
1231 LU_CONTEXT_KEY_NR = 32
1234 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1236 static cfs_spinlock_t lu_keys_guard = CFS_SPIN_LOCK_UNLOCKED;
1239 * Global counter incremented whenever key is registered, unregistered,
1240 * revived or quiesced. This is used to void unnecessary calls to
1241 * lu_context_refill(). No locking is provided, as initialization and shutdown
1242 * are supposed to be externally serialized.
1244 static unsigned key_set_version = 0;
1249 int lu_context_key_register(struct lu_context_key *key)
1254 LASSERT(key->lct_init != NULL);
1255 LASSERT(key->lct_fini != NULL);
1256 LASSERT(key->lct_tags != 0);
1257 LASSERT(key->lct_owner != NULL);
1260 cfs_spin_lock(&lu_keys_guard);
1261 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1262 if (lu_keys[i] == NULL) {
1264 cfs_atomic_set(&key->lct_used, 1);
1266 lu_ref_init(&key->lct_reference);
1272 cfs_spin_unlock(&lu_keys_guard);
1275 EXPORT_SYMBOL(lu_context_key_register);
1277 static void key_fini(struct lu_context *ctx, int index)
1279 if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1280 struct lu_context_key *key;
1282 key = lu_keys[index];
1283 LASSERT(key != NULL);
1284 LASSERT(key->lct_fini != NULL);
1285 LASSERT(cfs_atomic_read(&key->lct_used) > 1);
1287 key->lct_fini(ctx, key, ctx->lc_value[index]);
1288 lu_ref_del(&key->lct_reference, "ctx", ctx);
1289 cfs_atomic_dec(&key->lct_used);
1290 LASSERT(key->lct_owner != NULL);
1291 if (!(ctx->lc_tags & LCT_NOREF)) {
1292 LASSERT(cfs_module_refcount(key->lct_owner) > 0);
1293 cfs_module_put(key->lct_owner);
1295 ctx->lc_value[index] = NULL;
1302 void lu_context_key_degister(struct lu_context_key *key)
1304 LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
1305 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1307 lu_context_key_quiesce(key);
1310 cfs_spin_lock(&lu_keys_guard);
1311 key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1312 if (lu_keys[key->lct_index]) {
1313 lu_keys[key->lct_index] = NULL;
1314 lu_ref_fini(&key->lct_reference);
1316 cfs_spin_unlock(&lu_keys_guard);
1318 LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
1319 "key has instances: %d\n",
1320 cfs_atomic_read(&key->lct_used));
1322 EXPORT_SYMBOL(lu_context_key_degister);
1325 * Register a number of keys. This has to be called after all keys have been
1326 * initialized by a call to LU_CONTEXT_KEY_INIT().
1328 int lu_context_key_register_many(struct lu_context_key *k, ...)
1330 struct lu_context_key *key = k;
1336 result = lu_context_key_register(key);
1339 key = va_arg(args, struct lu_context_key *);
1340 } while (key != NULL);
1346 lu_context_key_degister(k);
1347 k = va_arg(args, struct lu_context_key *);
1354 EXPORT_SYMBOL(lu_context_key_register_many);
1357 * De-register a number of keys. This is a dual to
1358 * lu_context_key_register_many().
1360 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1366 lu_context_key_degister(k);
1367 k = va_arg(args, struct lu_context_key*);
1368 } while (k != NULL);
1371 EXPORT_SYMBOL(lu_context_key_degister_many);
1374 * Revive a number of keys.
1376 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1382 lu_context_key_revive(k);
1383 k = va_arg(args, struct lu_context_key*);
1384 } while (k != NULL);
1387 EXPORT_SYMBOL(lu_context_key_revive_many);
1390 * Quiescent a number of keys.
1392 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1398 lu_context_key_quiesce(k);
1399 k = va_arg(args, struct lu_context_key*);
1400 } while (k != NULL);
1403 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1406 * Return value associated with key \a key in context \a ctx.
1408 void *lu_context_key_get(const struct lu_context *ctx,
1409 const struct lu_context_key *key)
1411 LINVRNT(ctx->lc_state == LCS_ENTERED);
1412 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1413 LASSERT(lu_keys[key->lct_index] == key);
1414 return ctx->lc_value[key->lct_index];
1416 EXPORT_SYMBOL(lu_context_key_get);
1419 * List of remembered contexts. XXX document me.
1421 static CFS_LIST_HEAD(lu_context_remembered);
1424 * Destroy \a key in all remembered contexts. This is used to destroy key
1425 * values in "shared" contexts (like service threads), when a module owning
1426 * the key is about to be unloaded.
1428 void lu_context_key_quiesce(struct lu_context_key *key)
1430 struct lu_context *ctx;
1431 extern unsigned cl_env_cache_purge(unsigned nr);
1433 if (!(key->lct_tags & LCT_QUIESCENT)) {
1435 * XXX layering violation.
1437 cl_env_cache_purge(~0);
1438 key->lct_tags |= LCT_QUIESCENT;
1440 * XXX memory barrier has to go here.
1442 cfs_spin_lock(&lu_keys_guard);
1443 cfs_list_for_each_entry(ctx, &lu_context_remembered,
1445 key_fini(ctx, key->lct_index);
1446 cfs_spin_unlock(&lu_keys_guard);
1450 EXPORT_SYMBOL(lu_context_key_quiesce);
1452 void lu_context_key_revive(struct lu_context_key *key)
1454 key->lct_tags &= ~LCT_QUIESCENT;
1457 EXPORT_SYMBOL(lu_context_key_revive);
1459 static void keys_fini(struct lu_context *ctx)
1463 cfs_spin_lock(&lu_keys_guard);
1464 if (ctx->lc_value != NULL) {
1465 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1467 OBD_FREE(ctx->lc_value,
1468 ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1469 ctx->lc_value = NULL;
1471 cfs_spin_unlock(&lu_keys_guard);
1474 static int keys_fill(struct lu_context *ctx)
1478 LINVRNT(ctx->lc_value != NULL);
1479 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1480 struct lu_context_key *key;
1483 if (ctx->lc_value[i] == NULL && key != NULL &&
1484 (key->lct_tags & ctx->lc_tags) &&
1486 * Don't create values for a LCT_QUIESCENT key, as this
1487 * will pin module owning a key.
1489 !(key->lct_tags & LCT_QUIESCENT)) {
1492 LINVRNT(key->lct_init != NULL);
1493 LINVRNT(key->lct_index == i);
1495 value = key->lct_init(ctx, key);
1496 if (unlikely(IS_ERR(value)))
1497 return PTR_ERR(value);
1499 LASSERT(key->lct_owner != NULL);
1500 if (!(ctx->lc_tags & LCT_NOREF))
1501 cfs_try_module_get(key->lct_owner);
1502 lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1503 cfs_atomic_inc(&key->lct_used);
1505 * This is the only place in the code, where an
1506 * element of ctx->lc_value[] array is set to non-NULL
1509 ctx->lc_value[i] = value;
1510 if (key->lct_exit != NULL)
1511 ctx->lc_tags |= LCT_HAS_EXIT;
1513 ctx->lc_version = key_set_version;
1518 static int keys_init(struct lu_context *ctx)
1522 OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1523 if (likely(ctx->lc_value != NULL))
1524 result = keys_fill(ctx);
1534 * Initialize context data-structure. Create values for all keys.
1536 int lu_context_init(struct lu_context *ctx, __u32 tags)
1538 memset(ctx, 0, sizeof *ctx);
1539 ctx->lc_state = LCS_INITIALIZED;
1540 ctx->lc_tags = tags;
1541 if (tags & LCT_REMEMBER) {
1542 cfs_spin_lock(&lu_keys_guard);
1543 cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
1544 cfs_spin_unlock(&lu_keys_guard);
1546 CFS_INIT_LIST_HEAD(&ctx->lc_remember);
1547 return keys_init(ctx);
1549 EXPORT_SYMBOL(lu_context_init);
1552 * Finalize context data-structure. Destroy key values.
1554 void lu_context_fini(struct lu_context *ctx)
1556 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1557 ctx->lc_state = LCS_FINALIZED;
1559 cfs_spin_lock(&lu_keys_guard);
1560 cfs_list_del_init(&ctx->lc_remember);
1561 cfs_spin_unlock(&lu_keys_guard);
1563 EXPORT_SYMBOL(lu_context_fini);
1566 * Called before entering context.
1568 void lu_context_enter(struct lu_context *ctx)
1570 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1571 ctx->lc_state = LCS_ENTERED;
1573 EXPORT_SYMBOL(lu_context_enter);
1576 * Called after exiting from \a ctx
1578 void lu_context_exit(struct lu_context *ctx)
1582 LINVRNT(ctx->lc_state == LCS_ENTERED);
1583 ctx->lc_state = LCS_LEFT;
1584 if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1585 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1586 if (ctx->lc_value[i] != NULL) {
1587 struct lu_context_key *key;
1590 LASSERT(key != NULL);
1591 if (key->lct_exit != NULL)
1593 key, ctx->lc_value[i]);
1598 EXPORT_SYMBOL(lu_context_exit);
1601 * Allocate for context all missing keys that were registered after context
1602 * creation. key_set_version is only changed in rare cases when modules
1603 * are loaded and removed.
1605 int lu_context_refill(struct lu_context *ctx)
1607 return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
1609 EXPORT_SYMBOL(lu_context_refill);
1612 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1613 * obd being added. Currently, this is only used on client side, specifically
1614 * for echo device client, for other stack (like ptlrpc threads), context are
1615 * predefined when the lu_device type are registered, during the module probe
1618 __u32 lu_context_tags_default = 0;
1619 __u32 lu_session_tags_default = 0;
1621 void lu_context_tags_update(__u32 tags)
1623 cfs_spin_lock(&lu_keys_guard);
1624 lu_context_tags_default |= tags;
1626 cfs_spin_unlock(&lu_keys_guard);
1628 EXPORT_SYMBOL(lu_context_tags_update);
1630 void lu_context_tags_clear(__u32 tags)
1632 cfs_spin_lock(&lu_keys_guard);
1633 lu_context_tags_default &= ~tags;
1635 cfs_spin_unlock(&lu_keys_guard);
1637 EXPORT_SYMBOL(lu_context_tags_clear);
1639 void lu_session_tags_update(__u32 tags)
1641 cfs_spin_lock(&lu_keys_guard);
1642 lu_session_tags_default |= tags;
1644 cfs_spin_unlock(&lu_keys_guard);
1646 EXPORT_SYMBOL(lu_session_tags_update);
1648 void lu_session_tags_clear(__u32 tags)
1650 cfs_spin_lock(&lu_keys_guard);
1651 lu_session_tags_default &= ~tags;
1653 cfs_spin_unlock(&lu_keys_guard);
1655 EXPORT_SYMBOL(lu_session_tags_clear);
1657 int lu_env_init(struct lu_env *env, __u32 tags)
1662 result = lu_context_init(&env->le_ctx, tags);
1663 if (likely(result == 0))
1664 lu_context_enter(&env->le_ctx);
1667 EXPORT_SYMBOL(lu_env_init);
1669 void lu_env_fini(struct lu_env *env)
1671 lu_context_exit(&env->le_ctx);
1672 lu_context_fini(&env->le_ctx);
1675 EXPORT_SYMBOL(lu_env_fini);
1677 int lu_env_refill(struct lu_env *env)
1681 result = lu_context_refill(&env->le_ctx);
1682 if (result == 0 && env->le_ses != NULL)
1683 result = lu_context_refill(env->le_ses);
1686 EXPORT_SYMBOL(lu_env_refill);
1689 * Currently, this API will only be used by echo client.
1690 * Because echo client and normal lustre client will share
1691 * same cl_env cache. So echo client needs to refresh
1692 * the env context after it get one from the cache, especially
1693 * when normal client and echo client co-exist in the same client.
1695 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1700 if ((env->le_ctx.lc_tags & ctags) != ctags) {
1701 env->le_ctx.lc_version = 0;
1702 env->le_ctx.lc_tags |= ctags;
1705 if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1706 env->le_ses->lc_version = 0;
1707 env->le_ses->lc_tags |= stags;
1710 result = lu_env_refill(env);
1714 EXPORT_SYMBOL(lu_env_refill_by_tags);
1716 static struct cfs_shrinker *lu_site_shrinker = NULL;
1718 typedef struct lu_site_stats{
1719 unsigned lss_populated;
1720 unsigned lss_max_search;
1725 static void lu_site_stats_get(cfs_hash_t *hs,
1726 lu_site_stats_t *stats, int populated)
1731 cfs_hash_for_each_bucket(hs, &bd, i) {
1732 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1733 cfs_hlist_head_t *hhead;
1735 cfs_hash_bd_lock(hs, &bd, 1);
1736 stats->lss_busy += bkt->lsb_busy;
1737 stats->lss_total += cfs_hash_bd_count_get(&bd);
1738 stats->lss_max_search = max((int)stats->lss_max_search,
1739 cfs_hash_bd_depmax_get(&bd));
1741 cfs_hash_bd_unlock(hs, &bd, 1);
1745 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1746 if (!cfs_hlist_empty(hhead))
1747 stats->lss_populated++;
1749 cfs_hash_bd_unlock(hs, &bd, 1);
1755 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1757 lu_site_stats_t stats;
1759 struct lu_site *tmp;
1761 int remain = shrink_param(sc, nr_to_scan);
1762 CFS_LIST_HEAD(splice);
1765 if (!(shrink_param(sc, gfp_mask) & __GFP_FS))
1767 CDEBUG(D_INODE, "Shrink %d objects\n", remain);
1770 cfs_mutex_lock(&lu_sites_guard);
1771 cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1772 if (shrink_param(sc, nr_to_scan) != 0) {
1773 remain = lu_site_purge(&lu_shrink_env, s, remain);
1775 * Move just shrunk site to the tail of site list to
1776 * assure shrinking fairness.
1778 cfs_list_move_tail(&s->ls_linkage, &splice);
1781 memset(&stats, 0, sizeof(stats));
1782 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1783 cached += stats.lss_total - stats.lss_busy;
1784 if (shrink_param(sc, nr_to_scan) && remain <= 0)
1787 cfs_list_splice(&splice, lu_sites.prev);
1788 cfs_mutex_unlock(&lu_sites_guard);
1790 cached = (cached / 100) * sysctl_vfs_cache_pressure;
1791 if (shrink_param(sc, nr_to_scan) == 0)
1792 CDEBUG(D_INODE, "%d objects cached\n", cached);
1801 * Environment to be used in debugger, contains all tags.
1803 struct lu_env lu_debugging_env;
1806 * Debugging printer function using printk().
1808 int lu_printk_printer(const struct lu_env *env,
1809 void *unused, const char *format, ...)
1813 va_start(args, format);
1814 vprintk(format, args);
1819 void lu_debugging_setup(void)
1821 lu_env_init(&lu_debugging_env, ~0);
1824 void lu_context_keys_dump(void)
1828 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1829 struct lu_context_key *key;
1833 CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
1834 i, key, key->lct_tags,
1835 key->lct_init, key->lct_fini, key->lct_exit,
1836 key->lct_index, cfs_atomic_read(&key->lct_used),
1837 key->lct_owner ? key->lct_owner->name : "",
1839 lu_ref_print(&key->lct_reference);
1843 EXPORT_SYMBOL(lu_context_keys_dump);
1844 #else /* !__KERNEL__ */
1845 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1849 #endif /* __KERNEL__ */
1851 int cl_global_init(void);
1852 void cl_global_fini(void);
1853 int lu_ref_global_init(void);
1854 void lu_ref_global_fini(void);
1856 int dt_global_init(void);
1857 void dt_global_fini(void);
1859 int llo_global_init(void);
1860 void llo_global_fini(void);
1863 * Initialization of global lu_* data.
1865 int lu_global_init(void)
1869 CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
1871 result = lu_ref_global_init();
1875 LU_CONTEXT_KEY_INIT(&lu_global_key);
1876 result = lu_context_key_register(&lu_global_key);
1880 * At this level, we don't know what tags are needed, so allocate them
1881 * conservatively. This should not be too bad, because this
1882 * environment is global.
1884 cfs_mutex_lock(&lu_sites_guard);
1885 result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
1886 cfs_mutex_unlock(&lu_sites_guard);
1891 * seeks estimation: 3 seeks to read a record from oi, one to read
1892 * inode, one for ea. Unfortunately setting this high value results in
1893 * lu_object/inode cache consuming all the memory.
1895 lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
1896 if (lu_site_shrinker == NULL)
1899 result = lu_time_global_init();
1904 result = dt_global_init();
1908 result = llo_global_init();
1912 result = cl_global_init();
1919 * Dual to lu_global_init().
1921 void lu_global_fini(void)
1928 lu_time_global_fini();
1929 if (lu_site_shrinker != NULL) {
1930 cfs_remove_shrinker(lu_site_shrinker);
1931 lu_site_shrinker = NULL;
1934 lu_context_key_degister(&lu_global_key);
1937 * Tear shrinker environment down _after_ de-registering
1938 * lu_global_key, because the latter has a value in the former.
1940 cfs_mutex_lock(&lu_sites_guard);
1941 lu_env_fini(&lu_shrink_env);
1942 cfs_mutex_unlock(&lu_sites_guard);
1944 lu_ref_global_fini();
1947 struct lu_buf LU_BUF_NULL = {
1951 EXPORT_SYMBOL(LU_BUF_NULL);
1953 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
1956 struct lprocfs_counter ret;
1958 lprocfs_stats_collect(stats, idx, &ret);
1959 return (__u32)ret.lc_count;
1966 * Output site statistical counters into a buffer. Suitable for
1967 * lprocfs_rd_*()-style functions.
1969 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
1971 lu_site_stats_t stats;
1973 memset(&stats, 0, sizeof(stats));
1974 lu_site_stats_get(s->ls_obj_hash, &stats, 1);
1976 return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
1979 stats.lss_populated,
1980 CFS_HASH_NHLIST(s->ls_obj_hash),
1981 stats.lss_max_search,
1982 ls_stats_read(s->ls_stats, LU_SS_CREATED),
1983 ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
1984 ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
1985 ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
1986 ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
1987 ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
1989 EXPORT_SYMBOL(lu_site_stats_print);
1991 const char *lu_time_names[LU_TIME_NR] = {
1992 [LU_TIME_FIND_LOOKUP] = "find_lookup",
1993 [LU_TIME_FIND_ALLOC] = "find_alloc",
1994 [LU_TIME_FIND_INSERT] = "find_insert"
1996 EXPORT_SYMBOL(lu_time_names);
1999 * Helper function to initialize a number of kmem slab caches at once.
2001 int lu_kmem_init(struct lu_kmem_descr *caches)
2004 struct lu_kmem_descr *iter = caches;
2006 for (result = 0; iter->ckd_cache != NULL; ++iter) {
2007 *iter->ckd_cache = cfs_mem_cache_create(iter->ckd_name,
2010 if (*iter->ckd_cache == NULL) {
2012 /* free all previously allocated caches */
2013 lu_kmem_fini(caches);
2019 EXPORT_SYMBOL(lu_kmem_init);
2022 * Helper function to finalize a number of kmem slab cached at once. Dual to
2025 void lu_kmem_fini(struct lu_kmem_descr *caches)
2029 for (; caches->ckd_cache != NULL; ++caches) {
2030 if (*caches->ckd_cache != NULL) {
2031 rc = cfs_mem_cache_destroy(*caches->ckd_cache);
2032 LASSERTF(rc == 0, "couldn't destroy %s slab\n",
2034 *caches->ckd_cache = NULL;
2038 EXPORT_SYMBOL(lu_kmem_fini);